hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7941ca02292fe3ca47bb22618c5c7a4b82c83a41 | 4,201 | py | Python | keystone/common/sql/migrate_repo/versions/009_normalize_identity.py | ioram7/keystone | 81b5ad22cc8b85d622a332e62e5c4cb63155b654 | [
"Apache-2.0"
] | null | null | null | keystone/common/sql/migrate_repo/versions/009_normalize_identity.py | ioram7/keystone | 81b5ad22cc8b85d622a332e62e5c4cb63155b654 | [
"Apache-2.0"
] | null | null | null | keystone/common/sql/migrate_repo/versions/009_normalize_identity.py | ioram7/keystone | 81b5ad22cc8b85d622a332e62e5c4cb63155b654 | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column, MetaData, String, Table, Text, types
from sqlalchemy.orm import sessionmaker
#sqlite doesn't support dropping columns. Copy to a new table instead
def downgrade_user_table_with_copy(meta, migrate_engine):
maker = sessionmaker(bind=migrate_engine)
session = maker()
session.execute("ALTER TABLE user RENAME TO orig_user;")
user_table = Table(
'user',
meta,
Column('id', String(64), primary_key=True),
Column('name', String(64), unique=True, nullable=False),
Column('extra', Text()))
user_table.create(migrate_engine, checkfirst=True)
orig_user_table = Table('orig_user', meta, autoload=True)
for user in session.query(orig_user_table):
session.execute("insert into user (id, name, extra) "
"values ( :id, :name, :extra);",
{'id': user.id,
'name': user.name,
'extra': user.extra})
session.execute("drop table orig_user;")
session.close()
def downgrade_tenant_table_with_copy(meta, migrate_engine):
maker = sessionmaker(bind=migrate_engine)
session = maker()
session.execute("ALTER TABLE tenant RENAME TO orig_tenant;")
tenant_table = Table(
'tenant',
meta,
Column('id', String(64), primary_key=True),
Column('name', String(64), unique=True, nullable=False),
Column('extra', Text()))
tenant_table.create(migrate_engine, checkfirst=True)
orig_tenant_table = Table('orig_tenant', meta, autoload=True)
for tenant in session.query(orig_tenant_table):
session.execute("insert into tenant (id, name, extra) "
"values ( :id, :name, :extra);",
{'id': tenant.id,
'name': tenant.name,
'extra': tenant.extra})
session.execute("drop table orig_tenant;")
session.close()
def downgrade_user_table_with_column_drop(meta, migrate_engine):
user_table = Table('user', meta, autoload=True)
user_table.drop_column(Column('password', String(128)))
user_table.drop_column(Column('enabled', types.Boolean,
default=True))
def downgrade_tenant_table_with_column_drop(meta, migrate_engine):
tenant_table = Table('tenant', meta, autoload=True)
tenant_table.drop_column(Column('description', Text()))
tenant_table.drop_column(Column('enabled', types.Boolean))
def upgrade_user_table(meta, migrate_engine):
user_table = Table('user', meta, autoload=True)
user_table.create_column(Column('password', String(128)))
user_table.create_column(Column('enabled', types.Boolean,
default=True))
def upgrade_tenant_table(meta, migrate_engine):
tenant_table = Table('tenant', meta, autoload=True)
tenant_table.create_column(Column('description', Text()))
tenant_table.create_column(Column('enabled', types.Boolean))
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
upgrade_user_table(meta, migrate_engine)
upgrade_tenant_table(meta, migrate_engine)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name == 'sqlite':
downgrade_user_table_with_copy(meta, migrate_engine)
downgrade_tenant_table_with_copy(meta, migrate_engine)
else:
downgrade_user_table_with_column_drop(meta, migrate_engine)
downgrade_tenant_table_with_column_drop(meta, migrate_engine)
| 37.176991 | 75 | 0.677458 |
7941ca2f56affdacd097a5e003117b6cea905853 | 11,976 | py | Python | tests/test_subjects/test_asyncsubject.py | AlexMost/RxPY | 05cb14c72806dc41e243789c05f498dede11cebd | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/test_subjects/test_asyncsubject.py | AlexMost/RxPY | 05cb14c72806dc41e243789c05f498dede11cebd | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/test_subjects/test_asyncsubject.py | AlexMost/RxPY | 05cb14c72806dc41e243789c05f498dede11cebd | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-11-04T11:13:49.000Z | 2021-11-04T11:13:49.000Z | from nose.tools import assert_raises, raises
from rx import Observable, Observer
from rx.testing import TestScheduler, ReactiveTest, is_prime, MockDisposable
from rx.disposables import Disposable, SerialDisposable
from rx.subjects import AsyncSubject
from rx.internal.exceptions import DisposedException
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class RxException(Exception):
pass
# Helper function for raising exceptions within lambdas
def _raise(ex):
raise RxException(ex)
def test_infinite():
subject = [None]
subscription = [None]
subscription1 = [None]
subscription2 = [None]
subscription3 = [None]
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(70, 1),
on_next(110, 2),
on_next(220, 3),
on_next(270, 4),
on_next(340, 5),
on_next(410, 6),
on_next(520, 7),
on_next(630, 8),
on_next(710, 9),
on_next(870, 10),
on_next(940, 11),
on_next(1020, 12)
)
results1 = scheduler.create_observer()
results2 = scheduler.create_observer()
results3 = scheduler.create_observer()
def action1(scheduler, state=None):
subject[0] = AsyncSubject()
scheduler.schedule_absolute(100, action1)
def action2(scheduler, state=None):
subscription[0] = xs.subscribe(subject[0])
scheduler.schedule_absolute(200, action2)
def action3(scheduler, state=None):
subscription[0].dispose()
scheduler.schedule_absolute(1000, action3)
def action4(scheduler, state=None):
subscription1[0] = subject[0].subscribe(results1)
scheduler.schedule_absolute(300, action4)
def action5(scheduler, state=None):
subscription2[0] = subject[0].subscribe(results2)
scheduler.schedule_absolute(400, action5)
def action6(scheduler, state=None):
subscription3[0] = subject[0].subscribe(results3)
scheduler.schedule_absolute(900, action6)
def action7(scheduler, state=None):
subscription1[0].dispose()
scheduler.schedule_absolute(600, action7)
def action8(scheduler, state=None):
subscription2[0].dispose()
scheduler.schedule_absolute(700, action8)
def action9(scheduler, state=None):
subscription1[0].dispose()
scheduler.schedule_absolute(800, action9)
def action10(scheduler, state=None):
subscription3[0].dispose()
scheduler.schedule_absolute(950, action10)
scheduler.start()
results1.messages.assert_equal()
results2.messages.assert_equal()
results3.messages.assert_equal()
def test_finite():
subject = [None]
subscription = [None]
subscription1 = [None]
subscription2 = [None]
subscription3 = [None]
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(70, 1),
on_next(110, 2),
on_next(220, 3),
on_next(270, 4),
on_next(340, 5),
on_next(410, 6),
on_next(520, 7),
on_completed(630),
on_next(640, 9),
on_completed(650),
on_error(660, 'ex')
)
results1 = scheduler.create_observer()
results2 = scheduler.create_observer()
results3 = scheduler.create_observer()
def action1(scheduler, state=None):
subject[0] = AsyncSubject()
scheduler.schedule_absolute(100, action1)
def action2(scheduler, state=None):
subscription[0] = xs.subscribe(subject[0])
scheduler.schedule_absolute(200, action2)
def action3(scheduler, state=None):
subscription[0].dispose()
scheduler.schedule_absolute(1000, action3)
def action4(scheduler, state=None):
subscription1[0] = subject[0].subscribe(results1)
scheduler.schedule_absolute(300, action4)
def action5(scheduler, state=None):
subscription2[0] = subject[0].subscribe(results2)
scheduler.schedule_absolute(400, action5)
def action6(scheduler, state=None):
subscription3[0] = subject[0].subscribe(results3)
scheduler.schedule_absolute(900, action6)
def action7(scheduler, state=None):
subscription1[0].dispose()
scheduler.schedule_absolute(600, action7)
def action8(scheduler, state=None):
subscription2[0].dispose()
scheduler.schedule_absolute(700, action8)
def action9(scheduler, state=None):
subscription1[0].dispose()
scheduler.schedule_absolute(800, action9)
def action10(scheduler, state=None):
subscription3[0].dispose()
scheduler.schedule_absolute(950, action10)
scheduler.start()
results1.messages.assert_equal()
results2.messages.assert_equal(on_next(630, 7), on_completed(630))
results3.messages.assert_equal(on_next(900, 7), on_completed(900))
def test_error():
subject = [None]
subscription = [None]
subscription1 = [None]
subscription2 = [None]
subscription3 = [None]
ex = 'ex'
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(70, 1),
on_next(110, 2),
on_next(220, 3),
on_next(270, 4),
on_next(340, 5),
on_next(410, 6),
on_next(520, 7),
on_error(630, ex),
on_next(640, 9),
on_completed(650),
on_error(660, 'ex2')
)
results1 = scheduler.create_observer()
results2 = scheduler.create_observer()
results3 = scheduler.create_observer()
def action(scheduler, state=None):
subject[0] = AsyncSubject()
scheduler.schedule_absolute(100, action)
def action1(scheduler, state=None):
subscription[0] = xs.subscribe(subject[0])
scheduler.schedule_absolute(200, action1)
def action2(scheduler, state=None):
subscription[0].dispose()
scheduler.schedule_absolute(1000, action2)
def action3(scheduler, state=None):
subscription1[0] = subject[0].subscribe(results1)
scheduler.schedule_absolute(300, action3)
def action4(scheduler, state=None):
subscription2[0] = subject[0].subscribe(results2)
scheduler.schedule_absolute(400, action4)
def action5(scheduler, state=None):
subscription3[0] = subject[0].subscribe(results3)
scheduler.schedule_absolute(900, action5)
def action6(scheduler, state=None):
subscription1[0].dispose()
scheduler.schedule_absolute(600, action6)
def action7(scheduler, state=None):
subscription2[0].dispose()
scheduler.schedule_absolute(700, action7)
def action8(scheduler, state=None):
subscription1[0].dispose()
scheduler.schedule_absolute(800, action8)
def action9(scheduler, state=None):
subscription3[0].dispose()
scheduler.schedule_absolute(950, action9)
scheduler.start()
results1.messages.assert_equal()
results2.messages.assert_equal(on_error(630, ex))
results3.messages.assert_equal(on_error(900, ex))
def test_canceled():
subject = [None]
subscription = [None]
subscription1 = [None]
subscription2 = [None]
subscription3 = [None]
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_completed(630),
on_next(640, 9),
on_completed(650),
on_error(660, 'ex')
)
results1 = scheduler.create_observer()
results2 = scheduler.create_observer()
results3 = scheduler.create_observer()
def action1(scheduler, state=None):
subject[0] = AsyncSubject()
scheduler.schedule_absolute(100, action1)
def action2(scheduler, state=None):
subscription[0] = xs.subscribe(subject[0])
scheduler.schedule_absolute(200, action2)
def action3(scheduler, state=None):
subscription[0].dispose()
scheduler.schedule_absolute(1000, action3)
def action4(scheduler, state=None):
subscription1[0] = subject[0].subscribe(results1)
scheduler.schedule_absolute(300, action4)
def action5(scheduler, state=None):
subscription2[0] = subject[0].subscribe(results2)
scheduler.schedule_absolute(400, action5)
def action6(scheduler, state=None):
subscription3[0] = subject[0].subscribe(results3)
scheduler.schedule_absolute(900, action6)
def action7(scheduler, state=None):
subscription1[0].dispose()
scheduler.schedule_absolute(600, action7)
def action8(scheduler, state=None):
subscription2[0].dispose()
scheduler.schedule_absolute(700, action8)
def action9(scheduler, state=None):
subscription1[0].dispose()
scheduler.schedule_absolute(800, action9)
def action10(scheduler, state=None):
subscription3[0].dispose()
scheduler.schedule_absolute(950, action10)
scheduler.start()
results1.messages.assert_equal()
results2.messages.assert_equal(on_completed(630))
results3.messages.assert_equal(on_completed(900))
def test_subject_disposed():
subject = [None]
subscription1 = [None]
subscription2 = [None]
subscription3 = [None]
scheduler = TestScheduler()
results1 = scheduler.create_observer()
results2 = scheduler.create_observer()
results3 = scheduler.create_observer()
def action1(scheduler, state=None):
subject[0] = AsyncSubject()
scheduler.schedule_absolute(100, action1)
def action2(scheduler, state=None):
subscription1[0] = subject[0].subscribe(results1)
scheduler.schedule_absolute(200, action2)
def action3(scheduler, state=None):
subscription2[0] = subject[0].subscribe(results2)
scheduler.schedule_absolute(300, action3)
def action4(scheduler, state=None):
subscription3[0] = subject[0].subscribe(results3)
scheduler.schedule_absolute(400, action4)
def action5(scheduler, state=None):
subscription1[0].dispose()
scheduler.schedule_absolute(500, action5)
def action6(scheduler, state=None):
subject[0].dispose()
scheduler.schedule_absolute(600, action6)
def action7(scheduler, state=None):
subscription2[0].dispose()
scheduler.schedule_absolute(700, action7)
def action8(scheduler, state=None):
subscription3[0].dispose()
scheduler.schedule_absolute(800, action8)
def action9(scheduler, state=None):
subject[0].on_next(1)
scheduler.schedule_absolute(150, action9)
def action10(scheduler, state=None):
subject[0].on_next(2)
scheduler.schedule_absolute(250, action10)
def action11(scheduler, state=None):
subject[0].on_next(3)
scheduler.schedule_absolute(350, action11)
def action12(scheduler, state=None):
subject[0].on_next(4)
scheduler.schedule_absolute(450, action12)
def action13(scheduler, state=None):
subject[0].on_next(5)
scheduler.schedule_absolute(550, action13)
@raises(DisposedException)
def action14(scheduler, state=None):
subject[0].on_next(6)
scheduler.schedule_absolute(650, action14)
@raises(DisposedException)
def action15(scheduler, state=None):
subject[0].on_completed()
scheduler.schedule_absolute(750, action15)
@raises(DisposedException)
def action16(scheduler, state=None):
subject[0].on_error('ex')
scheduler.schedule_absolute(850, action16)
@raises(DisposedException)
def action17(scheduler, state=None):
subject[0].subscribe(None)
scheduler.schedule_absolute(950, action17)
scheduler.start()
results1.messages.assert_equal()
results2.messages.assert_equal()
results3.messages.assert_equal()
if __name__ == '__main__':
unittest.main()
| 30.629156 | 76 | 0.672512 |
7941ca7196c438b5a8a5a44a7e20ecce35ee8c63 | 1,159 | py | Python | nz_django/day6/model_form_demo/front/migrations/0001_initial.py | gaohj/nzflask_bbs | 36a94c380b78241ed5d1e07edab9618c3e8d477b | [
"Apache-2.0"
] | null | null | null | nz_django/day6/model_form_demo/front/migrations/0001_initial.py | gaohj/nzflask_bbs | 36a94c380b78241ed5d1e07edab9618c3e8d477b | [
"Apache-2.0"
] | 27 | 2020-02-12T07:55:58.000Z | 2022-03-12T00:19:09.000Z | nz_django/day6/model_form_demo/front/migrations/0001_initial.py | gaohj/nzflask_bbs | 36a94c380b78241ed5d1e07edab9618c3e8d477b | [
"Apache-2.0"
] | 2 | 2020-02-18T01:54:55.000Z | 2020-02-21T11:36:28.000Z | # Generated by Django 2.0 on 2020-02-25 07:32
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('page', models.IntegerField()),
('price', models.FloatField(validators=[django.core.validators.MaxValueValidator(limit_value=1000)])),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=100)),
('password', models.CharField(max_length=30)),
('telephone', models.CharField(max_length=11, validators=[django.core.validators.RegexValidator('1[3-9]\\d{9}')])),
],
),
]
| 34.088235 | 131 | 0.582399 |
7941ccda2f94efa176a95a3ca15e713b203b9779 | 8,221 | py | Python | sdk/python/pulumi_azure_native/customerinsights/v20170101/get_connector.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/customerinsights/v20170101/get_connector.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/customerinsights/v20170101/get_connector.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetConnectorResult',
'AwaitableGetConnectorResult',
'get_connector',
]
@pulumi.output_type
class GetConnectorResult:
"""
The connector resource format.
"""
def __init__(__self__, connector_id=None, connector_name=None, connector_properties=None, connector_type=None, created=None, description=None, display_name=None, id=None, is_internal=None, last_modified=None, name=None, state=None, tenant_id=None, type=None):
if connector_id and not isinstance(connector_id, int):
raise TypeError("Expected argument 'connector_id' to be a int")
pulumi.set(__self__, "connector_id", connector_id)
if connector_name and not isinstance(connector_name, str):
raise TypeError("Expected argument 'connector_name' to be a str")
pulumi.set(__self__, "connector_name", connector_name)
if connector_properties and not isinstance(connector_properties, dict):
raise TypeError("Expected argument 'connector_properties' to be a dict")
pulumi.set(__self__, "connector_properties", connector_properties)
if connector_type and not isinstance(connector_type, str):
raise TypeError("Expected argument 'connector_type' to be a str")
pulumi.set(__self__, "connector_type", connector_type)
if created and not isinstance(created, str):
raise TypeError("Expected argument 'created' to be a str")
pulumi.set(__self__, "created", created)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_internal and not isinstance(is_internal, bool):
raise TypeError("Expected argument 'is_internal' to be a bool")
pulumi.set(__self__, "is_internal", is_internal)
if last_modified and not isinstance(last_modified, str):
raise TypeError("Expected argument 'last_modified' to be a str")
pulumi.set(__self__, "last_modified", last_modified)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="connectorId")
def connector_id(self) -> int:
"""
ID of the connector.
"""
return pulumi.get(self, "connector_id")
@property
@pulumi.getter(name="connectorName")
def connector_name(self) -> Optional[str]:
"""
Name of the connector.
"""
return pulumi.get(self, "connector_name")
@property
@pulumi.getter(name="connectorProperties")
def connector_properties(self) -> Mapping[str, Any]:
"""
The connector properties.
"""
return pulumi.get(self, "connector_properties")
@property
@pulumi.getter(name="connectorType")
def connector_type(self) -> str:
"""
Type of connector.
"""
return pulumi.get(self, "connector_type")
@property
@pulumi.getter
def created(self) -> str:
"""
The created time.
"""
return pulumi.get(self, "created")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description of the connector.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
Display name of the connector.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isInternal")
def is_internal(self) -> Optional[bool]:
"""
If this is an internal connector.
"""
return pulumi.get(self, "is_internal")
@property
@pulumi.getter(name="lastModified")
def last_modified(self) -> str:
"""
The last modified time.
"""
return pulumi.get(self, "last_modified")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def state(self) -> str:
"""
State of connector.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The hub name.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetConnectorResult(GetConnectorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConnectorResult(
connector_id=self.connector_id,
connector_name=self.connector_name,
connector_properties=self.connector_properties,
connector_type=self.connector_type,
created=self.created,
description=self.description,
display_name=self.display_name,
id=self.id,
is_internal=self.is_internal,
last_modified=self.last_modified,
name=self.name,
state=self.state,
tenant_id=self.tenant_id,
type=self.type)
def get_connector(connector_name: Optional[str] = None,
hub_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConnectorResult:
"""
The connector resource format.
:param str connector_name: The name of the connector.
:param str hub_name: The name of the hub.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['connectorName'] = connector_name
__args__['hubName'] = hub_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:customerinsights/v20170101:getConnector', __args__, opts=opts, typ=GetConnectorResult).value
return AwaitableGetConnectorResult(
connector_id=__ret__.connector_id,
connector_name=__ret__.connector_name,
connector_properties=__ret__.connector_properties,
connector_type=__ret__.connector_type,
created=__ret__.created,
description=__ret__.description,
display_name=__ret__.display_name,
id=__ret__.id,
is_internal=__ret__.is_internal,
last_modified=__ret__.last_modified,
name=__ret__.name,
state=__ret__.state,
tenant_id=__ret__.tenant_id,
type=__ret__.type)
| 34.542017 | 263 | 0.634716 |
7941cd0e0369fdb139f70dfc106604a172155464 | 285 | py | Python | conftest.py | testinggg-art/Cyberbrain | e38c74c174e23aa386d005b03f09b30aa1b3a0ae | [
"MIT"
] | null | null | null | conftest.py | testinggg-art/Cyberbrain | e38c74c174e23aa386d005b03f09b30aa1b3a0ae | [
"MIT"
] | null | null | null | conftest.py | testinggg-art/Cyberbrain | e38c74c174e23aa386d005b03f09b30aa1b3a0ae | [
"MIT"
] | null | null | null | import sys
# test_outside_func.py is ignored because it has code in global scope, and is always
# executed if not ignored.
collect_ignore = ["test/test_outside_func.py", "test/test_generator.py"]
if sys.version_info[:2] < (3, 8):
collect_ignore.append("test/test_block_py38.py")
| 31.666667 | 84 | 0.754386 |
7941cd222382d46946f904ece31cbd55d3b2512b | 21,013 | py | Python | examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py | techthiyanes/adapter-transformers | 04aeaf63c4c54856d416925258393d9e06866b46 | [
"Apache-2.0"
] | null | null | null | examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py | techthiyanes/adapter-transformers | 04aeaf63c4c54856d416925258393d9e06866b46 | [
"Apache-2.0"
] | null | null | null | examples/pytorch/speech-recognition/run_speech_recognition_seq2seq.py | techthiyanes/adapter-transformers | 04aeaf63c4c54856d416925258393d9e06866b46 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for sequence to sequence speech recognition.
"""
# You can also adapt this script on your own sequence to sequence speech
# recognition task. Pointers for this are left as comments.
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import torch
from datasets import DatasetDict, load_dataset, load_metric
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForSpeechSeq2Seq,
AutoProcessor,
AutoTokenizer,
HfArgumentParser,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.19.0")
require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
feature_extractor_name: Optional[str] = field(
default=None, metadata={"help": "feature extractor name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
freeze_feature_encoder: bool = field(
default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."}
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: str = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
text_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
audio_column_name: str = field(
default="audio",
metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"},
)
text_column_name: str = field(
default="text",
metadata={"help": "The name of the dataset column containing the text data. Defaults to 'text'"},
)
max_duration_in_seconds: float = field(
default=20.0,
metadata={
"help": "Truncate audio files that are longer than `max_duration_in_seconds` seconds to 'max_duration_in_seconds`"
},
)
min_duration_in_seconds: float = field(
default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"}
)
preprocessing_only: bool = field(
default=False,
metadata={
"help": "Whether to only do data preprocessing and skip training. "
"This is especially useful when data preprocessing errors out in distributed training due to timeout. "
"In this case, one should run the preprocessing in a non-distributed setup with `preprocessing_only=True` "
"so that the cached datasets can consequently be loaded in distributed training"
},
)
train_split_name: str = field(
default="train",
metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
},
)
eval_split_name: str = field(
default="test",
metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
},
)
do_lower_case: bool = field(
default=True,
metadata={"help": "Whether the target text should be lower cased."},
)
@dataclass
class DataCollatorSpeechSeq2SeqWithPadding:
"""
Data collator that will dynamically pad the inputs received.
Args:
processor ([`Wav2Vec2Processor`])
The processor used for proccessing the data.
decoder_start_token_id (`int`)
The begin-of-sentence of the decoder.
"""
processor: Any
decoder_start_token_id: int
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
input_features = [{"input_values": feature["input_values"]} for feature in features]
label_features = [{"input_ids": feature["labels"]} for feature in features]
batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt")
labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt")
# replace padding with -100 to ignore loss correctly
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
# if bos token is appended in previous tokenization step,
# cut bos token here as it's append later anyways
if (labels[:, 0] == self.decoder_start_token_id).all().cpu().item():
labels = labels[:, 1:]
batch["labels"] = labels
return batch
def main():
# 1. Parse input arguments
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# 2. Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s", training_args)
# 3. Detecting last checkpoint and eventualy continue from last checkpoint
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# 4. Load dataset
raw_datasets = DatasetDict()
if training_args.do_train:
raw_datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=data_args.train_split_name,
use_auth_token=True if model_args.use_auth_token else None,
)
if training_args.do_eval:
raw_datasets["eval"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=data_args.eval_split_name,
use_auth_token=True if model_args.use_auth_token else None,
)
if data_args.audio_column_name not in next(iter(raw_datasets.values())).column_names:
raise ValueError(
f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--audio_column_name` to the correct audio column - one of "
f"{', '.join(next(iter(raw_datasets.values())).column_names)}."
)
if data_args.text_column_name not in next(iter(raw_datasets.values())).column_names:
raise ValueError(
f"--text_column_name {data_args.text_column_name} not found in dataset '{data_args.dataset_name}'. "
"Make sure to set `--text_column_name` to the correct text column - one of "
f"{', '.join(next(iter(raw_datasets.values())).column_names)}."
)
# 5. Load pretrained model, tokenizer, and feature extractor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
feature_extractor = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name if model_args.feature_extractor_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForSpeechSeq2Seq.from_pretrained(
model_args.model_name_or_path,
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
# 6. Resample speech dataset if necassary
dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
if dataset_sampling_rate != feature_extractor.sampling_rate:
raw_datasets = raw_datasets.cast_column(
data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate)
)
# 7. Preprocessing the datasets.
# We need to read the audio files as arrays and tokenize the targets.
max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate
min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate
audio_column_name = data_args.audio_column_name
num_workers = data_args.preprocessing_num_workers
text_column_name = data_args.text_column_name
model_input_name = feature_extractor.model_input_names[0]
do_lower_case = data_args.do_lower_case
if data_args.max_train_samples is not None:
raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples))
if data_args.max_eval_samples is not None:
raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples))
def prepare_dataset(batch):
# process audio
sample = batch[audio_column_name]
inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"])
# process audio length
batch[model_input_name] = inputs.input_values[0]
batch["input_length"] = len(batch["input_values"])
# process targets
input_str = batch[text_column_name].lower() if do_lower_case else batch[text_column_name]
batch["labels"] = tokenizer(input_str).input_ids
return batch
with training_args.main_process_first(desc="dataset map pre-processing"):
vectorized_datasets = raw_datasets.map(
prepare_dataset,
remove_columns=next(iter(raw_datasets.values())).column_names,
num_proc=data_args.preprocessing_num_workers,
desc="preprocess train dataset",
)
# filter data that is shorter than min_input_length or longer than
# max_input_length
def is_audio_in_length_range(length):
return length > min_input_length and length < max_input_length
vectorized_datasets = vectorized_datasets.filter(
is_audio_in_length_range,
num_proc=num_workers,
input_columns=["input_length"],
)
# for large datasets it is advised to run the preprocessing on a
# single machine first with `args.preprocessing_only` since there will mostly likely
# be a timeout when running the script in distributed mode.
# In a second step `args.preprocessing_only` can then be set to `False` to load the
# cached dataset
if data_args.preprocessing_only:
cache = {k: v.cache_files for k, v in vectorized_datasets.items()}
logger.info(f"Data preprocessing finished. Files cached at {cache}.")
return
# 8. Load Metric
metric = load_metric("wer")
def compute_metrics(pred):
pred_ids = pred.predictions
pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id
pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
# we do not want to group tokens when computing the metrics
label_str = tokenizer.batch_decode(pred.label_ids, skip_special_tokens=True)
wer = metric.compute(predictions=pred_str, references=label_str)
return {"wer": wer}
# 9. Create a single speech processor
if is_main_process(training_args.local_rank):
# save feature extractor, tokenizer and config
feature_extractor.save_pretrained(training_args.output_dir)
tokenizer.save_pretrained(training_args.output_dir)
config.save_pretrained(training_args.output_dir)
processor = AutoProcessor.from_pretrained(training_args.output_dir)
# 10. Define data collator
data_collator = DataCollatorSpeechSeq2SeqWithPadding(
processor=processor, decoder_start_token_id=model.config.decoder_start_token_id
)
# 11. Initialize Trainer
trainer = Seq2SeqTrainer(
model=model,
args=training_args,
train_dataset=vectorized_datasets["train"] if training_args.do_train else None,
eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None,
tokenizer=feature_extractor,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)
# 12. Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the feature extractor too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples
if data_args.max_train_samples is not None
else len(vectorized_datasets["train"])
)
metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"]))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# 13. Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate(
metric_key_prefix="eval", max_length=model.config.max_length, num_beams=model.config.num_beams
)
max_eval_samples = (
data_args.max_eval_samples if data_args.max_eval_samples is not None else len(vectorized_datasets["eval"])
)
metrics["eval_samples"] = min(max_eval_samples, len(vectorized_datasets["eval"]))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# 14. Write Training Stats
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "speech recognition"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
return results
if __name__ == "__main__":
main()
| 41.282908 | 126 | 0.695046 |
7941cef42bc9fabf52923f41cff0efd8032e3f4b | 32,499 | py | Python | tests/utilities/test_build_client_schema.py | hspedro/graphql-core | 2b27e641d51789f532f989d3e125e04b33d24564 | [
"MIT"
] | null | null | null | tests/utilities/test_build_client_schema.py | hspedro/graphql-core | 2b27e641d51789f532f989d3e125e04b33d24564 | [
"MIT"
] | null | null | null | tests/utilities/test_build_client_schema.py | hspedro/graphql-core | 2b27e641d51789f532f989d3e125e04b33d24564 | [
"MIT"
] | null | null | null | from pytest import raises # type: ignore
from graphql import graphql_sync
from graphql.pyutils import dedent
from graphql.type import (
GraphQLArgument,
GraphQLBoolean,
GraphQLEnumType,
GraphQLEnumValue,
GraphQLField,
GraphQLFloat,
GraphQLID,
GraphQLInt,
GraphQLObjectType,
GraphQLSchema,
GraphQLString,
assert_enum_type,
)
from graphql.utilities import (
build_schema,
build_client_schema,
introspection_from_schema,
print_schema,
)
def cycle_introspection(sdl_string):
"""Test that the client side introspection gives the same result.
This function does a full cycle of going from a string with the contents of the SDL,
build in-memory GraphQLSchema from it, produce a client-side representation of the
schema by using "build_client_schema" and then return that schema printed as SDL.
"""
options = dict(directive_is_repeatable=True)
server_schema = build_schema(sdl_string)
initial_introspection = introspection_from_schema(server_schema, **options)
client_schema = build_client_schema(initial_introspection)
# If the client then runs the introspection query against the client-side schema,
# it should get a result identical to what was returned by the server
second_introspection = introspection_from_schema(client_schema, **options)
# If the client then runs the introspection query against the client-side
# schema, it should get a result identical to what was returned by the server.
assert initial_introspection == second_introspection
return print_schema(client_schema)
def describe_type_system_build_schema_from_introspection():
def builds_a_simple_schema():
sdl = dedent(
'''
"""Simple schema"""
schema {
query: Simple
}
"""This is a simple type"""
type Simple {
"""This is a string field"""
string: String
}
'''
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_without_the_query_type():
sdl = dedent(
"""
type Query {
foo: String
}
"""
)
schema = build_schema(sdl)
introspection = introspection_from_schema(schema)
del introspection["__schema"]["queryType"]
client_schema = build_client_schema(introspection)
assert client_schema.query_type is None
assert print_schema(client_schema) == sdl
def builds_a_simple_schema_with_all_operation_types():
sdl = dedent(
'''
schema {
query: QueryType
mutation: MutationType
subscription: SubscriptionType
}
"""This is a simple mutation type"""
type MutationType {
"""Set the string field"""
string: String
}
"""This is a simple query type"""
type QueryType {
"""This is a string field"""
string: String
}
"""This is a simple subscription type"""
type SubscriptionType {
"""This is a string field"""
string: String
}
'''
)
assert cycle_introspection(sdl) == sdl
def uses_built_in_scalars_when_possible():
sdl = dedent(
"""
scalar CustomScalar
type Query {
int: Int
float: Float
string: String
boolean: Boolean
id: ID
custom: CustomScalar
}
"""
)
assert cycle_introspection(sdl) == sdl
schema = build_schema(sdl)
introspection = introspection_from_schema(schema)
client_schema = build_client_schema(introspection)
# Built-ins are used
assert client_schema.get_type("Int") is GraphQLInt
assert client_schema.get_type("Float") is GraphQLFloat
assert client_schema.get_type("String") is GraphQLString
assert client_schema.get_type("Boolean") is GraphQLBoolean
assert client_schema.get_type("ID") is GraphQLID
# Custom are built
custom_scalar = schema.get_type("CustomScalar")
assert client_schema.get_type("CustomScalar") is not custom_scalar
def includes_standard_types_only_if_they_are_used():
schema = build_schema(
"""
type Query {
foo: String
}
"""
)
introspection = introspection_from_schema(schema)
client_schema = build_client_schema(introspection)
assert client_schema.get_type("Int") is None
assert client_schema.get_type("Float") is None
assert client_schema.get_type("ID") is None
def builds_a_schema_with_a_recursive_type_reference():
sdl = dedent(
"""
schema {
query: Recur
}
type Recur {
recur: Recur
}
"""
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_a_circular_type_reference():
sdl = dedent(
"""
type Dog {
bestFriend: Human
}
type Human {
bestFriend: Dog
}
type Query {
dog: Dog
human: Human
}
"""
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_an_interface():
sdl = dedent(
'''
type Dog implements Friendly {
bestFriend: Friendly
}
interface Friendly {
"""The best friend of this friendly thing"""
bestFriend: Friendly
}
type Human implements Friendly {
bestFriend: Friendly
}
type Query {
friendly: Friendly
}
'''
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_an_interface_hierarchy():
sdl = dedent(
'''
type Dog implements Friendly & Named {
bestFriend: Friendly
name: String
}
interface Friendly implements Named {
"""The best friend of this friendly thing"""
bestFriend: Friendly
name: String
}
type Human implements Friendly & Named {
bestFriend: Friendly
name: String
}
interface Named {
name: String
}
type Query {
friendly: Friendly
}
'''
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_an_implicit_interface():
sdl = dedent(
'''
type Dog implements Friendly {
bestFriend: Friendly
}
interface Friendly {
"""The best friend of this friendly thing"""
bestFriend: Friendly
}
type Query {
dog: Dog
}
'''
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_a_union():
sdl = dedent(
"""
type Dog {
bestFriend: Friendly
}
union Friendly = Dog | Human
type Human {
bestFriend: Friendly
}
type Query {
friendly: Friendly
}
"""
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_complex_field_values():
sdl = dedent(
"""
type Query {
string: String
listOfString: [String]
nonNullString: String!
nonNullListOfString: [String]!
nonNullListOfNonNullString: [String!]!
}
"""
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_field_arguments():
sdl = dedent(
'''
type Query {
"""A field with a single arg"""
one(
"""This is an int arg"""
intArg: Int
): String
"""A field with a two args"""
two(
"""This is an list of int arg"""
listArg: [Int]
"""This is a required arg"""
requiredArg: Boolean!
): String
}
'''
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_default_value_on_custom_scalar_field():
sdl = dedent(
"""
scalar CustomScalar
type Query {
testField(testArg: CustomScalar = "default"): String
}
"""
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_an_enum():
food_enum = GraphQLEnumType(
"Food",
{
"VEGETABLES": GraphQLEnumValue(
1, description="Foods that are vegetables."
),
"FRUITS": GraphQLEnumValue(2),
"OILS": GraphQLEnumValue(3, deprecation_reason="Too fatty."),
},
description="Varieties of food stuffs",
)
schema = GraphQLSchema(
GraphQLObjectType(
"EnumFields",
{
"food": GraphQLField(
food_enum,
args={
"kind": GraphQLArgument(
food_enum, description="what kind of food?"
)
},
description="Repeats the arg you give it",
)
},
)
)
introspection = introspection_from_schema(schema)
client_schema = build_client_schema(introspection)
second_introspection = introspection_from_schema(client_schema)
assert second_introspection == introspection
# It's also an Enum type on the client.
client_food_enum = assert_enum_type(client_schema.get_type("Food"))
# Client types do not get server-only values, so they are set to None
# rather than using the integers defined in the "server" schema.
values = {
name: value.to_kwargs() for name, value in client_food_enum.values.items()
}
assert values == {
"VEGETABLES": {
"value": None,
"description": "Foods that are vegetables.",
"deprecation_reason": None,
"extensions": None,
"ast_node": None,
},
"FRUITS": {
"value": None,
"description": None,
"deprecation_reason": None,
"extensions": None,
"ast_node": None,
},
"OILS": {
"value": None,
"description": None,
"deprecation_reason": "Too fatty.",
"extensions": None,
"ast_node": None,
},
}
def builds_a_schema_with_an_input_object():
sdl = dedent(
'''
"""An input address"""
input Address {
"""What street is this address?"""
street: String!
"""The city the address is within?"""
city: String!
"""The country (blank will assume USA)."""
country: String = "USA"
}
type Query {
"""Get a geocode from an address"""
geocode(
"""The address to lookup"""
address: Address
): String
}
'''
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_field_arguments_with_default_values():
sdl = dedent(
"""
input Geo {
lat: Float
lon: Float
}
type Query {
defaultInt(intArg: Int = 30): String
defaultList(listArg: [Int] = [1, 2, 3]): String
defaultObject(objArg: Geo = {lat: 37.485, lon: -122.148}): String
defaultNull(intArg: Int = null): String
noDefault(intArg: Int): String
}
"""
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_custom_directives():
sdl = dedent(
'''
"""This is a custom directive"""
directive @customDirective repeatable on FIELD
type Query {
string: String
}
'''
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_without_directives():
sdl = dedent(
"""
type Query {
foo: String
}
"""
)
schema = build_schema(sdl)
introspection = introspection_from_schema(schema)
del introspection["__schema"]["directives"]
client_schema = build_client_schema(introspection)
assert schema.directives
assert client_schema.directives == []
assert print_schema(client_schema) == sdl
def builds_a_schema_aware_of_deprecation():
sdl = dedent(
'''
enum Color {
"""So rosy"""
RED
"""So grassy"""
GREEN
"""So calming"""
BLUE
"""So sickening"""
MAUVE @deprecated(reason: "No longer in fashion")
}
type Query {
"""This is a shiny string field"""
shinyString: String
"""This is a deprecated string field"""
deprecatedString: String @deprecated(reason: "Use shinyString")
color: Color
}
'''
)
assert cycle_introspection(sdl) == sdl
def builds_a_schema_with_empty_deprecation_reasons():
sdl = dedent(
"""
type Query {
someField: String @deprecated(reason: "")
}
enum SomeEnum {
SOME_VALUE @deprecated(reason: "")
}
"""
)
assert cycle_introspection(sdl) == sdl
def can_use_client_schema_for_limited_execution():
schema = build_schema(
"""
scalar CustomScalar
type Query {
foo(custom1: CustomScalar, custom2: CustomScalar): String
}
"""
)
introspection = introspection_from_schema(schema)
client_schema = build_client_schema(introspection)
class Data:
foo = "bar"
unused = "value"
result = graphql_sync(
client_schema,
"query Limited($v: CustomScalar) { foo(custom1: 123, custom2: $v) }",
root_value=Data(),
variable_values={"v": "baz"},
)
assert result.data == {"foo": "bar"}
def can_build_invalid_schema():
schema = build_schema("type Query", assume_valid=True)
introspection = introspection_from_schema(schema)
client_schema = build_client_schema(introspection, assume_valid=True)
assert client_schema.to_kwargs()["assume_valid"] is True
def describe_throws_when_given_invalid_introspection():
dummy_schema = build_schema(
"""
type Query {
foo(bar: String): String
}
interface SomeInterface {
foo: String
}
union SomeUnion = Query
enum SomeEnum { FOO }
input SomeInputObject {
foo: String
}
directive @SomeDirective on QUERY
"""
)
def throws_when_introspection_is_missing_schema_property():
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
build_client_schema(None) # type: ignore
assert str(exc_info.value) == (
"Invalid or incomplete introspection result. Ensure that you"
" are passing the 'data' attribute of an introspection response"
" and no 'errors' were returned alongside: None."
)
with raises(TypeError) as exc_info:
# noinspection PyTypeChecker
build_client_schema({})
assert str(exc_info.value) == (
"Invalid or incomplete introspection result. Ensure that you"
" are passing the 'data' attribute of an introspection response"
" and no 'errors' were returned alongside: {}."
)
def throws_when_referenced_unknown_type():
introspection = introspection_from_schema(dummy_schema)
introspection["__schema"]["types"] = [
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] != "Query"
]
with raises(TypeError) as exc_info:
build_client_schema(introspection)
assert str(exc_info.value) == (
"Invalid or incomplete schema, unknown type: Query."
" Ensure that a full introspection query is used"
" in order to build a client schema."
)
def throws_when_missing_definition_for_one_of_the_standard_scalars():
schema = build_schema(
"""
type Query {
foo: Float
}
"""
)
introspection = introspection_from_schema(schema)
introspection["__schema"]["types"] = [
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] != "Float"
]
with raises(TypeError) as exc_info:
build_client_schema(introspection)
assert str(exc_info.value).endswith(
"Invalid or incomplete schema, unknown type: Float."
" Ensure that a full introspection query is used"
" in order to build a client schema."
)
def throws_when_type_reference_is_missing_name():
introspection = introspection_from_schema(dummy_schema)
assert introspection["__schema"]["queryType"]["name"] == "Query"
del introspection["__schema"]["queryType"]["name"]
with raises(TypeError) as exc_info:
build_client_schema(introspection)
assert str(exc_info.value) == "Unknown type reference: {}."
def throws_when_missing_kind():
introspection = introspection_from_schema(dummy_schema)
query_type_introspection = next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "Query"
)
assert query_type_introspection["kind"] == "OBJECT"
del query_type_introspection["kind"]
with raises(
TypeError,
match=r"^Invalid or incomplete introspection result\."
" Ensure that a full introspection query is used"
r" in order to build a client schema: {'name': 'Query', .*}\.$",
):
build_client_schema(introspection)
def throws_when_missing_interfaces():
introspection = introspection_from_schema(dummy_schema)
query_type_introspection = next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "Query"
)
assert query_type_introspection["interfaces"] == []
del query_type_introspection["interfaces"]
with raises(
TypeError,
match="^Query interfaces cannot be resolved."
" Introspection result missing interfaces:"
r" {'kind': 'OBJECT', 'name': 'Query', .*}\.$",
):
build_client_schema(introspection)
def legacy_support_for_interfaces_with_null_as_interfaces_field():
introspection = introspection_from_schema(dummy_schema)
some_interface_introspection = next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "SomeInterface"
)
assert some_interface_introspection["interfaces"] == []
some_interface_introspection["interfaces"] = None
client_schema = build_client_schema(introspection)
assert print_schema(client_schema) == print_schema(dummy_schema)
def throws_when_missing_fields():
introspection = introspection_from_schema(dummy_schema)
query_type_introspection = next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "Query"
)
assert query_type_introspection["fields"]
del query_type_introspection["fields"]
with raises(
TypeError,
match="^Query fields cannot be resolved."
" Introspection result missing fields:"
r" {'kind': 'OBJECT', 'name': 'Query', .*}\.$",
):
build_client_schema(introspection)
def throws_when_missing_field_args():
introspection = introspection_from_schema(dummy_schema)
query_type_introspection = next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "Query"
)
assert query_type_introspection["fields"][0]["args"]
del query_type_introspection["fields"][0]["args"]
with raises(
TypeError,
match="^Query fields cannot be resolved."
r" Introspection result missing field args: {'name': 'foo', .*}\.$",
):
build_client_schema(introspection)
def throws_when_output_type_is_used_as_an_arg_type():
introspection = introspection_from_schema(dummy_schema)
query_type_introspection = next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "Query"
)
assert (
query_type_introspection["fields"][0]["args"][0]["type"]["name"]
== "String"
)
query_type_introspection["fields"][0]["args"][0]["type"][
"name"
] = "SomeUnion"
with raises(TypeError) as exc_info:
build_client_schema(introspection)
assert str(exc_info.value).startswith(
"Query fields cannot be resolved."
" Introspection must provide input type for arguments,"
" but received: SomeUnion."
)
def throws_when_output_type_is_used_as_an_input_value_type():
introspection = introspection_from_schema(dummy_schema)
input_object_type_introspection = next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "SomeInputObject"
)
assert (
input_object_type_introspection["inputFields"][0]["type"]["name"]
== "String"
)
input_object_type_introspection["inputFields"][0]["type"][
"name"
] = "SomeUnion"
with raises(TypeError) as exc_info:
build_client_schema(introspection)
assert str(exc_info.value).startswith(
"SomeInputObject fields cannot be resolved."
" Introspection must provide input type for input fields,"
" but received: SomeUnion."
)
def throws_when_input_type_is_used_as_a_field_type():
introspection = introspection_from_schema(dummy_schema)
query_type_introspection = next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "Query"
)
assert query_type_introspection["fields"][0]["type"]["name"] == "String"
query_type_introspection["fields"][0]["type"]["name"] = "SomeInputObject"
with raises(TypeError) as exc_info:
build_client_schema(introspection)
assert str(exc_info.value).startswith(
"Query fields cannot be resolved."
" Introspection must provide output type for fields,"
" but received: SomeInputObject."
)
def throws_when_missing_possible_types():
introspection = introspection_from_schema(dummy_schema)
some_union_introspection = next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "SomeUnion"
)
assert some_union_introspection["possibleTypes"]
del some_union_introspection["possibleTypes"]
with raises(
TypeError,
match="^Introspection result missing possibleTypes:"
r" {'kind': 'UNION', 'name': 'SomeUnion', .*}\.$",
):
build_client_schema(introspection)
def throws_when_missing_enum_values():
introspection = introspection_from_schema(dummy_schema)
some_enum_introspection = next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "SomeEnum"
)
assert some_enum_introspection["enumValues"]
del some_enum_introspection["enumValues"]
with raises(
TypeError,
match="^Introspection result missing enumValues:"
r" {'kind': 'ENUM', 'name': 'SomeEnum', .*}\.$",
):
build_client_schema(introspection)
def throws_when_missing_input_fields():
introspection = introspection_from_schema(dummy_schema)
some_input_object_introspection = next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "SomeInputObject"
)
assert some_input_object_introspection["inputFields"]
del some_input_object_introspection["inputFields"]
with raises(
TypeError,
match="^Introspection result missing inputFields:"
r" {'kind': 'INPUT_OBJECT', 'name': 'SomeInputObject', .*}\.$",
):
build_client_schema(introspection)
def throws_when_missing_directive_locations():
introspection = introspection_from_schema(dummy_schema)
some_directive_introspection = introspection["__schema"]["directives"][0]
assert some_directive_introspection["name"] == "SomeDirective"
assert some_directive_introspection["locations"] == ["QUERY"]
del some_directive_introspection["locations"]
with raises(
TypeError,
match="^Introspection result missing directive locations:"
r" {'name': 'SomeDirective', .*}\.$",
):
build_client_schema(introspection)
def throws_when_missing_directive_args():
introspection = introspection_from_schema(dummy_schema)
some_directive_introspection = introspection["__schema"]["directives"][0]
assert some_directive_introspection["name"] == "SomeDirective"
assert some_directive_introspection["args"] == []
del some_directive_introspection["args"]
with raises(
TypeError,
match="^Introspection result missing directive args:"
r" {'name': 'SomeDirective', .*}\.$",
):
build_client_schema(introspection)
def describe_very_deep_decorators_are_not_supported():
def fails_on_very_deep_lists_more_than_7_levels():
schema = build_schema(
"""
type Query {
foo: [[[[[[[[String]]]]]]]]
}
"""
)
introspection = introspection_from_schema(schema)
with raises(TypeError) as exc_info:
build_client_schema(introspection)
assert str(exc_info.value) == (
"Query fields cannot be resolved."
" Decorated type deeper than introspection query."
)
def fails_on_a_very_deep_non_null_more_than_7_levels():
schema = build_schema(
"""
type Query {
foo: [[[[String!]!]!]!]
}
"""
)
introspection = introspection_from_schema(schema)
with raises(TypeError) as exc_info:
build_client_schema(introspection)
assert str(exc_info.value) == (
"Query fields cannot be resolved."
" Decorated type deeper than introspection query."
)
def succeeds_on_deep_types_less_or_equal_7_levels():
# e.g., fully non-null 3D matrix
sdl = dedent(
"""
type Query {
foo: [[[String!]!]!]!
}
"""
)
assert cycle_introspection(sdl) == sdl
def describe_prevents_infinite_recursion_on_invalid_introspection():
def recursive_interfaces():
sdl = """
type Query {
foo: Foo
}
type Foo {
foo: String
}
"""
schema = build_schema(sdl, assume_valid=True)
introspection = introspection_from_schema(schema)
foo_introspection = next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "Foo"
)
assert foo_introspection["interfaces"] == []
# we need to patch here since invalid interfaces cannot be built with Python
foo_introspection["interfaces"] = [
{"kind": "OBJECT", "name": "Foo", "ofType": None}
]
with raises(TypeError) as exc_info:
build_client_schema(introspection)
assert str(exc_info.value) == (
"Foo interfaces cannot be resolved."
" Expected Foo to be a GraphQL Interface type."
)
def recursive_union():
sdl = """
type Query {
foo: Foo
}
union Foo
"""
schema = build_schema(sdl, assume_valid=True)
introspection = introspection_from_schema(schema)
foo_introspection = next(
type_
for type_ in introspection["__schema"]["types"]
if type_["name"] == "Foo"
)
assert foo_introspection["kind"] == "UNION"
assert foo_introspection["possibleTypes"] == []
# we need to patch here since invalid unions cannot be built with Python
foo_introspection["possibleTypes"] = [
{"kind": "UNION", "name": "Foo", "ofType": None}
]
with raises(TypeError) as exc_info:
build_client_schema(introspection)
assert str(exc_info.value) == (
"Foo types cannot be resolved."
" Expected Foo to be a GraphQL Object type."
)
| 31.644596 | 88 | 0.525893 |
7941cf9492d448499691f6cc928bf28124e1e587 | 1,059 | py | Python | benchmarks/convnet/nets/overfeat.py | kmaehashi/chainer-benchmark | 7af2005d71253d236f7f239119d7130f22b26bb4 | [
"MIT"
] | 9 | 2018-04-09T10:26:45.000Z | 2019-07-13T11:31:49.000Z | benchmarks/convnet/nets/overfeat.py | chainer/chainer-benchmark | 8d0c8f5052b5e2a85ad522ff48899ffc9a2bfafb | [
"MIT"
] | 19 | 2018-04-09T10:35:12.000Z | 2018-08-30T08:49:40.000Z | benchmarks/convnet/nets/overfeat.py | chainer/chainer-benchmark | 8d0c8f5052b5e2a85ad522ff48899ffc9a2bfafb | [
"MIT"
] | 2 | 2018-04-09T10:26:53.000Z | 2019-03-20T01:35:26.000Z | import chainer
import chainer.functions as F
import chainer.links as L
class overfeat(chainer.Chain):
insize = 231
def __init__(self):
super(overfeat, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(3, 96, 11, stride=4)
self.conv2 = L.Convolution2D(96, 256, 5, pad=0)
self.conv3 = L.Convolution2D(256, 512, 3, pad=1)
self.conv4 = L.Convolution2D(512, 1024, 3, pad=1)
self.conv5 = L.Convolution2D(1024, 1024, 3, pad=1)
self.fc6 = L.Linear(1024 * 6 * 6, 3072)
self.fc7 = L.Linear(3072, 4096)
self.fc8 = L.Linear(4096, 1000)
def forward(self, x):
h = F.max_pooling_2d(F.relu(self.conv1(x)), 2, stride=2)
h = F.max_pooling_2d(F.relu(self.conv2(h)), 2, stride=2)
h = F.relu(self.conv3(h))
h = F.relu(self.conv4(h))
h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
h = F.relu(self.fc6(h))
h = F.relu(self.fc7(h))
return self.fc8(h)
| 35.3 | 66 | 0.564684 |
7941d09a5f29369cad0714aab7050d6fd1a58a9b | 4,362 | py | Python | contrib/devtools/logprint-scanner.py | Gh0st-N1njA/PactumCoin | bf9b86e9f4015b9edce0a662b3b0ea3e243d866f | [
"MIT"
] | null | null | null | contrib/devtools/logprint-scanner.py | Gh0st-N1njA/PactumCoin | bf9b86e9f4015b9edce0a662b3b0ea3e243d866f | [
"MIT"
] | null | null | null | contrib/devtools/logprint-scanner.py | Gh0st-N1njA/PactumCoin | bf9b86e9f4015b9edce0a662b3b0ea3e243d866f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The PCTM developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import os, sys
from subprocess import check_output
def countRelevantCommas(line):
openParensPosStack = []
openParensPos = 0
charCounter = 0
numRelevantCommas = 0
firstOpenParensIndex = line.find("(")
for char in line:
if char == '(':
openParensPosStack.append(charCounter)
if char == ')':
openParensPosStack.pop()
if char == "," and openParensPosStack[-1] == firstOpenParensIndex:
numRelevantCommas += 1
charCounter += 1
return numRelevantCommas
if __name__ == "__main__":
out = check_output("git rev-parse --show-toplevel", shell=True, universal_newlines=True)
srcDir = out.rstrip() + "/src/"
filelist = [os.path.join(dp, f) for dp, dn, filenames in os.walk(srcDir) for f in filenames if os.path.splitext(f)[1] == '.cpp' or os.path.splitext(f)[1] == '.h' ]
incorrectInstanceCounter = 0
for file in filelist:
f = open(file,"r", encoding="utf-8")
data = f.read()
rows = data.split("\n")
count = 0
full_data = []
lineCounter = 1
tempLine = ""
tempCount = 0
for row in rows:
# Collapse multiple lines into one
tempLine += row
# Line contains LogPrint or LogPrintf
if tempLine.find("LogPrint") != -1:
if tempLine.count("(") == tempLine.count(")"):
havePercents = tempLine.count('%') > 0
if havePercents:
# This line of code has a format specifier that requires checking number of associated arguments
# Determine the number of arguments provided, see if that matches the number of format specifiers
# Count the number of commas after the format specifier string. Check to see if it matches the number of format specifiers.
# Assumes quotes are not escaped in the specifier string and there are no percent signs when specifying the debug level.
# First, determine the position of the comma after the format specifier section, named commaAfterEndSpecifierStringIndex
firstSpecifierIndex = tempLine.find('%')
startSpecifierStringIndex = tempLine.rfind('"',firstSpecifierIndex)
endSpecifierStringIndex = tempLine.find('"',firstSpecifierIndex)
commaAfterEndSpecifierStringIndex = tempLine.find(',',endSpecifierStringIndex)
# Count the number of commas after the specifier string
line = "(" + tempLine[commaAfterEndSpecifierStringIndex:-1]
numCommas = countRelevantCommas(line)
# Determine number of extra percents after specifier string
numExtraPercents = tempLine.count('%', commaAfterEndSpecifierStringIndex)
# Subtract extra from total count. This is the number of expected specifiers
# ignore %%
numPercents = tempLine.count('%') - numExtraPercents - 2*tempLine.count('%%')
if numPercents != numCommas:
print("Incorrect number of arguments for LogPrint(f) statement found.")
print(str(file) + ":" + str(lineCounter - tempCount))
print("Line = " + tempLine)
print("numRelevantCommas = " + str(numCommas) + ", numRelevantPercents = " + str(numPercents))
print("")
incorrectInstanceCounter += 1
# Done with this multiline, clear tempLine
tempLine = ""
tempCount = 0
else:
tempCount += 1
else:
# No LogPrint, clear tempLine
tempLine = ""
tempCount = 0
lineCounter += 1
print("# of incorrect instances: " + str(incorrectInstanceCounter))
sys.exit(incorrectInstanceCounter)
| 42.349515 | 167 | 0.568088 |
7941d10480099da01f8e1544b4dcc77abcc72a85 | 30,741 | py | Python | sahara/tests/integration/tests/base.py | esikachev/scenario | 40a59114c7bac44fea510767a3c07d73649f4caf | [
"Apache-2.0"
] | null | null | null | sahara/tests/integration/tests/base.py | esikachev/scenario | 40a59114c7bac44fea510767a3c07d73649f4caf | [
"Apache-2.0"
] | null | null | null | sahara/tests/integration/tests/base.py | esikachev/scenario | 40a59114c7bac44fea510767a3c07d73649f4caf | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import telnetlib
import time
import fixtures
from keystoneclient.v2_0 import client as keystone_client
from neutronclient.v2_0 import client as neutron_client
from novaclient.v1_1 import client as nova_client
from oslo_utils import excutils
from oslo_utils import uuidutils
from oslotest import base
from saharaclient.api import base as client_base
import saharaclient.client as sahara_client
import six
from swiftclient import client as swift_client
from testtools import testcase
from sahara.tests.integration.configs import config as cfg
import sahara.utils.openstack.images as imgs
from sahara.utils import ssh_remote
logger = logging.getLogger('swiftclient')
logger.setLevel(logging.WARNING)
def errormsg(message):
def decorator(fct):
def wrapper(*args, **kwargs):
try:
fct(*args, **kwargs)
except Exception as e:
with excutils.save_and_reraise_exception():
ITestCase.print_error_log(message, e)
return wrapper
return decorator
def skip_test(config_name, message=''):
def handle(func):
def call(self, *args, **kwargs):
if getattr(self, config_name):
print(
'\n======================================================='
)
print('INFO: ' + message)
print(
'=======================================================\n'
)
else:
return func(self, *args, **kwargs)
return call
return handle
class ITestCase(testcase.WithAttributes, base.BaseTestCase):
def setUp(self):
super(ITestCase, self).setUp()
self.common_config = cfg.ITConfig().common_config
self.plugin_config = self.get_plugin_config()
self._setup_clients()
self._setup_networks()
self._setup_volume_params()
self._setup_flavor()
self._setup_ssh_access()
self._image_id, self._ssh_username = (
self.get_image_id_and_ssh_username())
telnetlib.Telnet(
self.common_config.SAHARA_HOST, self.common_config.SAHARA_PORT
)
def get_plugin_config(self):
raise NotImplementedError
def _setup_ssh_access(self):
if not self.common_config.PATH_TO_SSH_KEY:
self.user_keypair_id = self.rand_name(
self.common_config.USER_KEYPAIR_ID)
self.private_key = self.nova.keypairs.create(
self.user_keypair_id).private_key
else:
self.user_keypair_id = self.common_config.USER_KEYPAIR_ID
self.private_key = open(self.common_config.PATH_TO_SSH_KEY).read()
def _setup_flavor(self):
if not self.common_config.FLAVOR_ID:
self.flavor_id = self.nova.flavors.create(
name=self.rand_name('i-test-flavor'),
ram=1024,
vcpus=1,
disk=10,
ephemeral=10).id
else:
self.flavor_id = self.common_config.FLAVOR_ID
def _setup_networks(self):
self.floating_ip_pool = self.common_config.FLOATING_IP_POOL
self.internal_neutron_net = None
if self.common_config.NEUTRON_ENABLED:
self.internal_neutron_net = self.get_internal_neutron_net_id()
self.floating_ip_pool = (
self.get_floating_ip_pool_id_for_neutron_net())
def _setup_volume_params(self):
self.volumes_per_node = 0
self.volumes_size = 0
if not getattr(self.plugin_config, 'SKIP_CINDER_TEST', False):
self.volumes_per_node = 2
self.volumes_size = 2
def _setup_clients(self):
keystone = keystone_client.Client(
username=self.common_config.OS_USERNAME,
password=self.common_config.OS_PASSWORD,
tenant_name=self.common_config.OS_TENANT_NAME,
auth_url=self.common_config.OS_AUTH_URL)
keystone.management_url = self.common_config.OS_AUTH_URL
tenant_id = [tenant.id for tenant in keystone.tenants.list()
if tenant.name == self.common_config.OS_TENANT_NAME][0]
self.sahara = sahara_client.Client(
version=self.common_config.SAHARA_API_VERSION,
username=self.common_config.OS_USERNAME,
api_key=self.common_config.OS_PASSWORD,
project_name=self.common_config.OS_TENANT_NAME,
auth_url=self.common_config.OS_AUTH_URL,
sahara_url='http://%s:%s/v%s/%s' % (
self.common_config.SAHARA_HOST,
self.common_config.SAHARA_PORT,
self.common_config.SAHARA_API_VERSION,
tenant_id
))
self.nova = nova_client.Client(
username=self.common_config.OS_USERNAME,
api_key=self.common_config.OS_PASSWORD,
project_id=self.common_config.OS_TENANT_NAME,
auth_url=self.common_config.OS_AUTH_URL)
self.neutron = neutron_client.Client(
username=self.common_config.OS_USERNAME,
password=self.common_config.OS_PASSWORD,
tenant_name=self.common_config.OS_TENANT_NAME,
auth_url=self.common_config.OS_AUTH_URL)
# ------------------------Methods for object creation--------------------------
def create_node_group_template(self, name, plugin_config, description,
node_processes, node_configs,
volumes_per_node=0, volumes_size=0,
floating_ip_pool=None, **kwargs):
data = self.sahara.node_group_templates.create(
name, plugin_config.PLUGIN_NAME, plugin_config.HADOOP_VERSION,
self.flavor_id, description, volumes_per_node, volumes_size,
node_processes, node_configs, floating_ip_pool, **kwargs)
node_group_template_id = data.id
return node_group_template_id
def create_cluster_template(self, name, plugin_config, description,
cluster_configs, node_groups,
anti_affinity=None, net_id=None):
for node_group in node_groups:
for key, value in node_group.items():
if value is None:
del node_group[key]
data = self.sahara.cluster_templates.create(
name, plugin_config.PLUGIN_NAME, plugin_config.HADOOP_VERSION,
description, cluster_configs, node_groups, anti_affinity, net_id)
cluster_template_id = data.id
return cluster_template_id
def create_cluster(self, name, plugin_config, cluster_template_id,
description, cluster_configs,
node_groups=None, anti_affinity=None,
net_id=None, is_transient=False):
self.cluster_id = None
data = self.sahara.clusters.create(
name, plugin_config.PLUGIN_NAME, plugin_config.HADOOP_VERSION,
cluster_template_id, self._image_id, is_transient,
description, cluster_configs, node_groups,
self.user_keypair_id, anti_affinity, net_id)
self.cluster_id = data.id
return self.cluster_id
def get_cluster_info(self, plugin_config):
node_ip_list_with_node_processes = (
self.get_cluster_node_ip_list_with_node_processes(self.cluster_id))
try:
node_info = self.get_node_info(node_ip_list_with_node_processes,
plugin_config)
except Exception as e:
with excutils.save_and_reraise_exception():
print(
'\nFailure during check of node process deployment '
'on cluster node: ' + six.text_type(e)
)
# For example: method "create_cluster_and_get_info" return
# {
# 'node_info': {
# 'tasktracker_count': 3,
# 'node_count': 6,
# 'namenode_ip': '172.18.168.242',
# 'datanode_count': 3
# },
# 'cluster_id': 'bee5c6a1-411a-4e88-95fc-d1fbdff2bb9d',
# 'node_ip_list': {
# '172.18.168.153': ['tasktracker', 'datanode'],
# '172.18.168.208': ['secondarynamenode', 'oozie'],
# '172.18.168.93': ['tasktracker'],
# '172.18.168.101': ['tasktracker', 'datanode'],
# '172.18.168.242': ['namenode', 'jobtracker'],
# '172.18.168.167': ['datanode']
# },
# 'plugin_config': <oslo_config.cfg.GroupAttr object at 0x215d9d>
# }
return {
'cluster_id': self.cluster_id,
'node_ip_list': node_ip_list_with_node_processes,
'node_info': node_info,
'plugin_config': plugin_config
}
# --------Helper methods for cluster info obtaining and its processing---------
def poll_cluster_state(self, cluster_id):
data = self.sahara.clusters.get(cluster_id)
timeout = self.common_config.CLUSTER_CREATION_TIMEOUT * 60
try:
with fixtures.Timeout(timeout, gentle=True):
while True:
status = str(data.status)
if status == 'Active':
break
if status == 'Error':
self.fail('Cluster state == \'Error\'.')
time.sleep(10)
data = self.sahara.clusters.get(cluster_id)
except fixtures.TimeoutException:
self.fail("Cluster did not return to 'Active' state "
"within %d minutes." %
self.common_config.CLUSTER_CREATION_TIMEOUT)
return status
def get_cluster_node_ip_list_with_node_processes(self, cluster_id):
data = self.sahara.clusters.get(cluster_id)
node_groups = data.node_groups
node_ip_list_with_node_processes = {}
for node_group in node_groups:
instances = node_group['instances']
for instance in instances:
node_ip = instance['management_ip']
node_ip_list_with_node_processes[node_ip] = node_group[
'node_processes']
# For example:
# node_ip_list_with_node_processes = {
# '172.18.168.181': ['tasktracker'],
# '172.18.168.94': ['secondarynamenode'],
# '172.18.168.208': ['namenode', 'jobtracker'],
# '172.18.168.93': ['tasktracker', 'datanode'],
# '172.18.168.44': ['tasktracker', 'datanode'],
# '172.18.168.233': ['datanode']
# }
return node_ip_list_with_node_processes
def put_file_to_hdfs(self, namenode_ip, remote_path, data):
tmp_file_path = '/tmp/%s' % uuidutils.generate_uuid()[:8]
self.open_ssh_connection(namenode_ip)
self.write_file_to(tmp_file_path, data)
self.execute_command(
'sudo su - -c "hadoop dfs -copyFromLocal %s %s" %s' % (
tmp_file_path, remote_path, self.plugin_config.HADOOP_USER))
self.execute_command('rm -fr %s' % tmp_file_path)
self.close_ssh_connection()
def try_telnet(self, host, port):
try:
telnetlib.Telnet(host, port)
except Exception as e:
with excutils.save_and_reraise_exception():
print(
'\nTelnet has failed: ' + six.text_type(e) +
' NODE IP: %s, PORT: %s. Passed %s minute(s).'
% (host, port, self.common_config.TELNET_TIMEOUT)
)
def get_node_info(self, node_ip_list_with_node_processes, plugin_config):
tasktracker_count = 0
datanode_count = 0
timeout = self.common_config.TELNET_TIMEOUT * 60
with fixtures.Timeout(timeout, gentle=True):
accessible = False
proc_with_ports = plugin_config.HADOOP_PROCESSES_WITH_PORTS
while not accessible:
accessible = True
for node_ip, processes in six.iteritems(
node_ip_list_with_node_processes):
try:
self.try_telnet(node_ip, '22')
except Exception:
accessible = False
for process in processes:
if process in proc_with_ports:
try:
self.try_telnet(node_ip,
proc_with_ports[process])
except Exception:
print('Connection attempt. NODE PROCESS: %s, '
'PORT: %s.' % (
process, proc_with_ports[process]))
accessible = False
if not accessible:
time.sleep(1)
for node_ip, processes in six.iteritems(
node_ip_list_with_node_processes):
if plugin_config.PROCESS_NAMES['tt'] in processes:
tasktracker_count += 1
if plugin_config.PROCESS_NAMES['dn'] in processes:
datanode_count += 1
if plugin_config.PROCESS_NAMES['nn'] in processes:
namenode_ip = node_ip
return {
'namenode_ip': namenode_ip,
'tasktracker_count': tasktracker_count,
'datanode_count': datanode_count,
'node_count': len(node_ip_list_with_node_processes)
}
def await_active_workers_for_namenode(self, node_info, plugin_config):
self.open_ssh_connection(node_info['namenode_ip'])
timeout = self.common_config.HDFS_INITIALIZATION_TIMEOUT * 60
try:
with fixtures.Timeout(timeout, gentle=True):
while True:
active_tasktracker_count = self.execute_command(
'sudo -u %s bash -lc "hadoop job -list-active-trackers'
'" | grep "^tracker_" | wc -l'
% plugin_config.HADOOP_USER)[1]
try:
active_tasktracker_count = int(
active_tasktracker_count)
except ValueError:
active_tasktracker_count = -1
active_datanode_count = self.execute_command(
'sudo -u %s bash -lc "hadoop dfsadmin -report" | '
'grep -e "Datanodes available:.*" '
'-e "Live datanodes.*" | grep -o "[0-9]*" | head -1'
% plugin_config.HADOOP_USER)[1]
try:
active_datanode_count = int(active_datanode_count)
except ValueError:
active_datanode_count = -1
if (active_tasktracker_count ==
node_info['tasktracker_count'] and
active_datanode_count ==
node_info['datanode_count']):
break
time.sleep(10)
except fixtures.TimeoutException:
self.fail(
'Tasktracker or datanode cannot be started within '
'%s minute(s) for namenode.'
% self.common_config.HDFS_INITIALIZATION_TIMEOUT
)
finally:
self.close_ssh_connection()
def await_active_tasktracker(self, node_info, plugin_config):
self.open_ssh_connection(node_info['namenode_ip'])
for i in range(self.common_config.HDFS_INITIALIZATION_TIMEOUT * 6):
time.sleep(10)
active_tasktracker_count = self.execute_command(
'sudo -u %s bash -lc "hadoop job -list-active-trackers" '
'| grep "^tracker_" | wc -l'
% plugin_config.HADOOP_USER)[1]
active_tasktracker_count = int(active_tasktracker_count)
if (active_tasktracker_count == node_info['tasktracker_count']):
break
else:
self.fail(
'Tasktracker or datanode cannot be started within '
'%s minute(s) for namenode.'
% self.common_config.HDFS_INITIALIZATION_TIMEOUT)
self.close_ssh_connection()
@errormsg("Failure while event log testing: ")
def _test_event_log(self, cluster_id):
cluster = self.sahara.clusters.get(cluster_id)
events = self.sahara.events.list(cluster_id)
invalid_steps = []
if not events:
events = []
for step in cluster.provision_progress:
if not step['successful']:
invalid_steps.append(step)
if len(invalid_steps) > 0 or len(events) > 0:
events_info = "\n".join(six.text_type(e) for e in events)
invalid_steps_info = "\n".join(six.text_type(e)
for e in invalid_steps)
steps_info = "\n".join(six.text_type(e)
for e in cluster.provision_progress)
self.fail(
"Issues with event log work: "
"\n Not removed events: \n\n {events}"
"\n Incomplete steps: \n\n {invalid_steps}"
"\n All steps: \n\n {steps}".format(
events=events_info,
steps=steps_info,
invalid_steps=invalid_steps_info))
# --------------------------------Remote---------------------------------------
def connect_to_swift(self):
return swift_client.Connection(
authurl=self.common_config.OS_AUTH_URL,
user=self.common_config.OS_USERNAME,
key=self.common_config.OS_PASSWORD,
tenant_name=self.common_config.OS_TENANT_NAME,
auth_version=self.common_config.SWIFT_AUTH_VERSION
)
def open_ssh_connection(self, host):
ssh_remote._connect(host, self._ssh_username, self.private_key)
@staticmethod
def execute_command(cmd):
return ssh_remote._execute_command(cmd, get_stderr=True)
@staticmethod
def write_file_to(remote_file, data):
ssh_remote._write_file_to(remote_file, data)
@staticmethod
def read_file_from(remote_file):
return ssh_remote._read_file_from(remote_file)
@staticmethod
def close_ssh_connection():
ssh_remote._cleanup()
def transfer_helper_conf_file_to_node(self, file_name):
file = open('sahara/tests/integration/tests/resources/%s' % file_name
).read()
try:
self.write_file_to(file_name, file)
except Exception as e:
with excutils.save_and_reraise_exception():
print(
'\nFailure while conf file transferring '
'to cluster node: ' + six.text_type(e)
)
def transfer_helper_script_to_node(self, script_name, parameter_list=None):
script = open('sahara/tests/integration/tests/resources/%s'
% script_name).read()
if parameter_list:
for parameter, value in parameter_list.items():
script = script.replace(
'%s=""' % parameter, '%s=%s' % (parameter, value))
try:
self.write_file_to('script.sh', script)
except Exception as e:
with excutils.save_and_reraise_exception():
print(
'\nFailure while helper script transferring '
'to cluster node: ' + six.text_type(e)
)
self.execute_command('chmod 777 script.sh')
def transfer_helper_script_to_nodes(self, node_ip_list, script_name,
parameter_list=None):
for node_ip in node_ip_list:
self.open_ssh_connection(node_ip)
self.transfer_helper_script_to_node(script_name, parameter_list)
self.close_ssh_connection()
# -------------------------------Helper methods--------------------------------
def get_image_id_and_ssh_username(self):
def print_error_log(parameter, value):
print(
'\nImage with %s "%s" was found in image list but it was '
'possibly not registered for Sahara. Please, make sure image '
'was correctly registered.' % (parameter, value)
)
def try_get_image_id_and_ssh_username(parameter, value):
try:
return image.id, image.metadata[imgs.PROP_USERNAME]
except KeyError:
with excutils.save_and_reraise_exception():
print_error_log(parameter, value)
images = self.nova.images.list()
# If plugin_config.IMAGE_ID is not None then find corresponding image
# and return its ID and username. If image not found then handle error
if self.plugin_config.IMAGE_ID:
for image in images:
if image.id == self.plugin_config.IMAGE_ID:
return try_get_image_id_and_ssh_username(
'ID', self.plugin_config.IMAGE_ID)
self.fail(
'\n\nImage with ID "%s" not found in image list. Please, make '
'sure you specified right image ID.\n' %
self.plugin_config.IMAGE_ID)
# If plugin_config.IMAGE_NAME is not None then find corresponding image
# and return its ID and username. If image not found then handle error
if self.plugin_config.IMAGE_NAME:
for image in images:
if image.name == self.plugin_config.IMAGE_NAME:
return try_get_image_id_and_ssh_username(
'name', self.plugin_config.IMAGE_NAME)
self.fail(
'\n\nImage with name "%s" not found in image list. Please, '
'make sure you specified right image name.\n'
% self.plugin_config.IMAGE_NAME)
# If plugin_config.IMAGE_TAG is not None then find corresponding image
# and return its ID and username. If image not found then handle error
if self.plugin_config.IMAGE_TAG:
for image in images:
if (image.metadata.get(imgs.PROP_TAG + '%s'
% self.plugin_config.IMAGE_TAG)) and (
image.metadata.get(imgs.PROP_TAG + str(
self.plugin_config.PLUGIN_NAME))):
return try_get_image_id_and_ssh_username(
'tag', self.plugin_config.IMAGE_TAG
)
self.fail(
'\n\nImage with tag "%s" not found in list of registered '
'images for Sahara. Please, make sure tag "%s" was added to '
'image and image was correctly registered.\n'
% (self.plugin_config.IMAGE_TAG, self.plugin_config.IMAGE_TAG)
)
# If plugin_config.IMAGE_ID, plugin_config.IMAGE_NAME and
# plugin_config.IMAGE_TAG are None then image is chosen
# by tag "sahara_i_tests". If image has tag "sahara_i_tests"
# (at the same time image ID, image name and image tag were not
# specified in configuration file of integration tests) then return
# its ID and username. Found image will be chosen as image for tests.
# If image with tag "sahara_i_tests" not found then handle error
for image in images:
if (image.metadata.get(imgs.PROP_TAG + 'sahara_i_tests')) and (
image.metadata.get(imgs.PROP_TAG + str(
self.plugin_config.PLUGIN_NAME))):
try:
return image.id, image.metadata[imgs.PROP_USERNAME]
except KeyError:
with excutils.save_and_reraise_exception():
print(
'\nNone of parameters of image (ID, name, tag)'
' was specified in configuration file of '
'integration tests. That is why there was '
'attempt to choose image by tag '
'"sahara_i_tests" and image with such tag '
'was found in image list but it was possibly '
'not registered for Sahara. Please, make '
'sure image was correctly registered.'
)
self.fail(
'\n\nNone of parameters of image (ID, name, tag) was specified in '
'configuration file of integration tests. That is why there was '
'attempt to choose image by tag "sahara_i_tests" but image with '
'such tag not found in list of registered images for Sahara. '
'Please, make sure image was correctly registered. Please, '
'specify one of parameters of image (ID, name or tag) in '
'configuration file of integration tests.\n'
)
def get_floating_ip_pool_id_for_neutron_net(self):
# Find corresponding floating IP pool by its name and get its ID.
# If pool not found then handle error
try:
floating_ip_pool = self.neutron.list_networks(
name=self.common_config.FLOATING_IP_POOL)
floating_ip_pool_id = floating_ip_pool['networks'][0]['id']
return floating_ip_pool_id
except IndexError:
with excutils.save_and_reraise_exception():
raise Exception(
'\nFloating IP pool \'%s\' not found in pool list. '
'Please, make sure you specified right floating IP pool.'
% self.common_config.FLOATING_IP_POOL
)
def get_internal_neutron_net_id(self):
# Find corresponding internal Neutron network by its name and get
# its ID. If network not found then handle error
try:
internal_neutron_net = self.neutron.list_networks(
name=self.common_config.INTERNAL_NEUTRON_NETWORK)
internal_neutron_net_id = internal_neutron_net['networks'][0]['id']
return internal_neutron_net_id
except IndexError:
with excutils.save_and_reraise_exception():
raise Exception(
'\nInternal Neutron network \'%s\' not found in network '
'list. Please, make sure you specified right network name.'
% self.common_config.INTERNAL_NEUTRON_NETWORK
)
def delete_objects(self, cluster_id=None,
cluster_template_id=None,
node_group_template_id_list=None):
if not self.common_config.RETAIN_CLUSTER_AFTER_TEST:
if cluster_id:
try:
self.sahara.clusters.delete(cluster_id)
except client_base.APIException:
# cluster in deleting state or deleted
pass
try:
# waiting roughly for 300 seconds for cluster to terminate
with fixtures.Timeout(300, gentle=True):
while True:
try:
self.sahara.clusters.get(cluster_id)
except client_base.APIException:
# Cluster is finally deleted
break
time.sleep(5)
except fixtures.TimeoutException:
self.fail('Cluster failed to terminate in 300 seconds: '
'%s' % cluster_id)
if cluster_template_id:
self.sahara.cluster_templates.delete(cluster_template_id)
if node_group_template_id_list:
for node_group_template_id in node_group_template_id_list:
self.sahara.node_group_templates.delete(
node_group_template_id
)
@staticmethod
def delete_swift_container(swift, container):
objects = [obj['name'] for obj in swift.get_container(container)[1]]
for obj in objects:
swift.delete_object(container, obj)
swift.delete_container(container)
@staticmethod
def print_error_log(message, exception=None):
print(
'\n\n!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!* '
'ERROR LOG *!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*'
'!*!\n'
)
print(message + six.text_type(exception))
print(
'\n!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!* END OF '
'ERROR LOG *!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*'
'!*!\n\n'
)
def capture_error_log_from_cluster_node(self, log_file):
print(
'\n\n!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!* CAPTURED ERROR '
'LOG FROM CLUSTER NODE *!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*'
'!*!\n'
)
print(self.read_file_from(log_file))
print(
'\n!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!* END OF CAPTURED ERROR '
'LOG FROM CLUSTER NODE *!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*!*'
'!*!\n\n'
)
@staticmethod
def rand_name(name):
return '%s-%s' % (name, uuidutils.generate_uuid()[:8])
def tearDown(self):
if not self.common_config.PATH_TO_SSH_KEY:
self.nova.keypairs.delete(self.user_keypair_id)
if not self.common_config.FLAVOR_ID:
self.nova.flavors.delete(self.flavor_id)
super(ITestCase, self).tearDown()
| 42.226648 | 79 | 0.560489 |
7941d295aa85e160e8382d424eaa3e3f6df7164c | 2,846 | py | Python | nbr/models.py | johnmwangi/Nbr_Hood | 7a4be73db001560a2a17f37166a544381fe07581 | [
"MIT"
] | null | null | null | nbr/models.py | johnmwangi/Nbr_Hood | 7a4be73db001560a2a17f37166a544381fe07581 | [
"MIT"
] | 3 | 2021-03-19T01:03:21.000Z | 2022-01-13T01:16:52.000Z | nbr/models.py | johnmwangi/Nbr_Hood | 7a4be73db001560a2a17f37166a544381fe07581 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class Location(models.Model):
name = models.CharField(max_length=100,null=True)
def __str__(self):
return self.name
class Hood(models.Model):
hood_photo = models.ImageField(upload_to='hoods/')
hood_name = models.CharField(max_length=100, null=True)
occupants_count = models.PositiveIntegerField(default=0)
location = models.ForeignKey(Location, on_delete=models.CASCADE, null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
@classmethod
def get_hoods(cls):
hoods = Hood.objects.all()
return hoods
@classmethod
def search_hood(cls,hood_search):
hoods = cls.objects.filter(id__icontains = hood_search)
return hoods
class Meta:
ordering = ['hood_name']
class Business(models.Model):
b_photo = models.ImageField(upload_to='business/',null=True)
b_name = models.CharField(max_length=100, blank=True, null=True)
b_description = models.TextField(max_length=200, blank=True, null=True)
b_email = models.CharField(max_length=100, blank=True, null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE,null=True)
hood = models.ForeignKey(Hood, on_delete=models.CASCADE, related_name='biz',null=True)
@classmethod
def get_business(cls):
business = Business.objects.all()
return business
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE,null=True)
profile_photo= models.ImageField(upload_to='profiles/',null=True)
bio= models.CharField(max_length=240, null=True)
email = models.CharField(max_length=100, blank=True, null=True)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
@receiver(post_save, sender=User)
def update_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
instance.profile.save()
@classmethod
def get_profile(cls):
profile = Profile.objects.all()
return profile
class Meta:
ordering = ['user']
class Join(models.Model):
user_id = models.OneToOneField(User)
hood_id = models.ForeignKey(Hood)
def __str__(self):
return self.user_id
class Posts(models.Model):
title = models.CharField(max_length = 300)
content = models.TextField()
posted_by = models.ForeignKey(User, null=True)
hood = models.ForeignKey(Hood)
def save_posts(self):
self.save()
def delete_posts(self):
self.delete()
def __str__(self):
return self.title
| 29.957895 | 90 | 0.711525 |
7941d2ba01ba9a99ddc42ff4ed2404d541fdfbfc | 162 | py | Python | app/gpt/gptauto.py | toratoradora/sukima | 456fb160b5455ed9f3a1eac648cd1ad1c9fcc2f1 | [
"BSD-2-Clause"
] | null | null | null | app/gpt/gptauto.py | toratoradora/sukima | 456fb160b5455ed9f3a1eac648cd1ad1c9fcc2f1 | [
"BSD-2-Clause"
] | null | null | null | app/gpt/gptauto.py | toratoradora/sukima | 456fb160b5455ed9f3a1eac648cd1ad1c9fcc2f1 | [
"BSD-2-Clause"
] | null | null | null | class GPTAuto:
def __init__(self, model_name='generic'):
self.model_name = model_name
def generate(self, args):
return 'Not implemented'
| 23.142857 | 45 | 0.660494 |
7941d31624033994a517c40e2ab0ec5b9de9c537 | 15,881 | py | Python | app/enquiries/tests/test_auth.py | uktrade/enquiry-mgmt-tool | 547a195cb9296ad0b1dbc5c8435bfeeae9755bb4 | [
"MIT"
] | 3 | 2020-04-03T12:16:56.000Z | 2020-12-09T18:18:49.000Z | app/enquiries/tests/test_auth.py | uktrade/enquiry-mgmt-tool | 547a195cb9296ad0b1dbc5c8435bfeeae9755bb4 | [
"MIT"
] | 173 | 2020-03-18T09:31:50.000Z | 2022-03-26T04:19:41.000Z | app/enquiries/tests/test_auth.py | uktrade/enquiry-mgmt-tool | 547a195cb9296ad0b1dbc5c8435bfeeae9755bb4 | [
"MIT"
] | null | null | null | import datetime
from collections.abc import Mapping, Sequence
import mohawk
import pytest
from django.urls import reverse, path
from freezegun import freeze_time
from rest_framework import status
from rest_framework.response import Response
from rest_framework.test import APIRequestFactory
from rest_framework.views import APIView
from app.enquiries import auth
from app.enquiries.tests.factories import OwnerFactory
HAWK_CREDENTIALS = {
"test-id-without-scope": {
"key": "test-key-without-scope",
"scopes": (),
},
"test-id-with-scope": {
"key": "test-key-with-scope",
"scopes": ("enquiries",),
},
"test-id-with-multiple-scopes": {
"key": "test-key-with-multiple-scopes",
"scopes": ("enquiries", "views"),
},
}
class HawkViewWithoutScope(auth.HawkResponseSigningMixin, APIView):
"""View using Hawk authentication."""
authentication_classes = (auth.HawkAuthentication,)
permission_classes = ()
def get(self, request):
"""Simple test view with fixed response."""
return Response({"content": "hawk-test-view-without-scope"})
class HawkViewWithScope(auth.HawkResponseSigningMixin, APIView):
"""View using Hawk authentication."""
authentication_classes = (auth.HawkAuthentication,)
permission_classes = (auth.HawkScopePermission,)
required_hawk_scope = "enquiries"
def get(self, request):
"""Simple test view with fixed response."""
return Response({"content": "hawk-test-view-with-scope"})
class PaasIPView(APIView):
"""View using PaaS IP Authentication."""
authentication_classes = (auth.PaaSIPAuthentication,)
permission_classes = ()
def get(self, request):
"""Simple test view with fixed response."""
return Response({"content": "paas-ip-test-view"})
urlpatterns = [
path(
"test-hawk-without-scope/",
HawkViewWithoutScope.as_view(),
name="test-hawk-without-scope",
),
path(
"test-hawk-with-scope/",
HawkViewWithScope.as_view(),
name="test-hawk-with-scope",
),
path(
"test-paas-ip/",
PaasIPView.as_view(),
name="test-paas-ip",
),
]
def _url():
return "http://testserver" + reverse("test-hawk-without-scope")
def _url_incorrect_domain():
return "http://incorrect" + reverse("test-hawk-without-scope")
def _url_incorrect_path():
return "http://testserver" + reverse("test-hawk-without-scope") + "incorrect/"
def _url_with_scope():
return "http://testserver" + reverse("test-hawk-with-scope")
def _auth_sender(
key_id="test-id-without-scope",
secret_key="test-key-without-scope",
url=_url,
method="GET",
content="",
content_type="",
):
credentials = {
"id": key_id,
"key": secret_key,
"algorithm": "sha256",
}
return mohawk.Sender(
credentials,
url(),
method,
content=content,
content_type=content_type,
)
def identity(value):
"""Pass through a single argument unchanged."""
return value
def resolve_data(data, value_resolver=identity):
"""
Recursively resolve callables in data structures.
Given a value:
- if it's a callable, resolve it
- if it's a sequence, resolve each of the sequence's values
- if it's a dict, resolve each value of the dict
The resolved value is returned.
Used in parametrised tests.
"""
if isinstance(data, Mapping):
return {
key: resolve_data(value, value_resolver=value_resolver)
for key, value in data.items()
}
if isinstance(data, Sequence) and not isinstance(data, (str, bytes)):
return [resolve_data(value, value_resolver=value_resolver) for value in data]
if callable(data):
return value_resolver(data())
return value_resolver(data)
@pytest.mark.django_db
@pytest.mark.urls("app.enquiries.tests.test_auth")
class TestHawkAuthentication:
"""Tests Hawk authentication when using HawkAuthentication."""
@pytest.mark.parametrize(
"get_kwargs,expected_json",
(
(
# If the Authorization header isn't passed
{
"content_type": "",
},
{"detail": "Authentication credentials were not provided."},
),
(
# If the Authorization header generated from an incorrect ID
{
"content_type": "",
"HTTP_AUTHORIZATION": lambda: _auth_sender(key_id="incorrect").request_header,
},
{"detail": "Incorrect authentication credentials."},
),
(
# If the Authorization header generated from an incorrect secret
{
"content_type": "",
"HTTP_AUTHORIZATION":
lambda: _auth_sender(secret_key="incorrect").request_header,
},
{"detail": "Incorrect authentication credentials."},
),
(
# If the Authorization header generated from an incorrect domain
{
"content_type": "",
"HTTP_AUTHORIZATION":
lambda: _auth_sender(url=_url_incorrect_domain).request_header,
},
{"detail": "Incorrect authentication credentials."},
),
(
# If the Authorization header generated from an incorrect path
{
"content_type": "",
"HTTP_AUTHORIZATION":
lambda: _auth_sender(url=_url_incorrect_path).request_header,
},
{"detail": "Incorrect authentication credentials."},
),
(
# If the Authorization header generated from an incorrect method
{
"content_type": "",
"HTTP_AUTHORIZATION": lambda: _auth_sender(method="POST").request_header,
},
{"detail": "Incorrect authentication credentials."},
),
(
# If the Authorization header generated from an incorrect
# content-type
{
"content_type": "",
"HTTP_AUTHORIZATION":
lambda: _auth_sender(content_type="incorrect").request_header,
},
{"detail": "Incorrect authentication credentials."},
),
(
# If the Authorization header generated from incorrect content
{
"content_type": "",
"HTTP_AUTHORIZATION": lambda: _auth_sender(content="incorrect").request_header,
},
{"detail": "Incorrect authentication credentials."},
),
),
)
def test_401_returned(self, api_client, get_kwargs, expected_json):
"""If the request isn't properly Hawk-authenticated, then a 401 is
returned
"""
resolved_get_kwargs = resolve_data(get_kwargs)
response = api_client.get(
_url(),
**resolved_get_kwargs,
)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
assert response.json() == expected_json
def test_if_61_seconds_in_past_401_returned(self, api_client):
"""If the Authorization header is generated 61 seconds in the past, then a
401 is returned
"""
past = datetime.datetime.now() - datetime.timedelta(seconds=61)
with freeze_time(past):
auth = _auth_sender().request_header
response = api_client.get(
reverse("test-hawk-without-scope"),
content_type="",
HTTP_AUTHORIZATION=auth,
)
assert response.status_code == status.HTTP_401_UNAUTHORIZED
assert response.json() == {"detail": "Incorrect authentication credentials."}
@pytest.mark.usefixtures("local_memory_cache")
def test_if_authentication_reused_401_returned(self, api_client, settings):
"""If the Authorization header is reused, then a 401 is returned"""
settings.HAWK_CREDENTIALS = HAWK_CREDENTIALS
auth = _auth_sender().request_header
response_1 = api_client.get(
_url(),
content_type="",
HTTP_AUTHORIZATION=auth,
)
assert response_1.status_code == status.HTTP_200_OK
response_2 = api_client.get(
_url(),
content_type="",
HTTP_AUTHORIZATION=auth,
)
assert response_2.status_code == status.HTTP_401_UNAUTHORIZED
assert response_2.json() == {"detail": "Incorrect authentication credentials."}
def test_returned_object_with_authentication_3_ips(self, api_client, settings):
"""If the Authorization and X-Forwarded-For headers are correct,
with an extra IP address prepended to the X-Forwarded-For then
the correct, and authentic, data is returned
"""
settings.HAWK_CREDENTIALS = HAWK_CREDENTIALS
sender = _auth_sender()
response = api_client.get(
_url(),
content_type="",
HTTP_AUTHORIZATION=sender.request_header,
)
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"content": "hawk-test-view-without-scope"}
def test_returned_object_with_authentication(self, api_client, settings):
"""If the Authorization and X-Forwarded-For headers are correct, then
the correct, and authentic, data is returned
"""
settings.HAWK_CREDENTIALS = HAWK_CREDENTIALS
sender = _auth_sender()
response = api_client.get(
_url(),
content_type="",
HTTP_AUTHORIZATION=sender.request_header,
)
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"content": "hawk-test-view-without-scope"}
@pytest.mark.django_db
@pytest.mark.urls("app.enquiries.tests.test_auth")
class TestHawkResponseSigningMixin:
"""Tests Hawk response signing when using HawkResponseMiddleware."""
def test_returned_object_with_authentication(self, api_client, settings):
"""If the Authorization and X-Forwarded-For headers are correct, then
the correct, and authentic, data is returned
"""
settings.HAWK_CREDENTIALS = HAWK_CREDENTIALS
sender = _auth_sender()
response = api_client.get(
_url(),
content_type="",
HTTP_AUTHORIZATION=sender.request_header,
)
assert response.status_code == status.HTTP_200_OK
# Just asserting that accept_response doesn't raise is a bit weak,
# so we also assert that it raises if the header, content, or
# content_type are incorrect
sender.accept_response(
response_header=response["Server-Authorization"],
content=response.content,
content_type=response["Content-Type"],
)
with pytest.raises(mohawk.exc.MacMismatch):
sender.accept_response(
response_header='Hawk mac="incorrect", hash="incorrect"',
content=response.content,
content_type=response["Content-Type"],
)
with pytest.raises(mohawk.exc.MisComputedContentHash):
sender.accept_response(
response_header=response["Server-Authorization"],
content="incorrect",
content_type=response["Content-Type"],
)
with pytest.raises(mohawk.exc.MisComputedContentHash):
sender.accept_response(
response_header=response["Server-Authorization"],
content=response.content,
content_type="incorrect",
)
def test_does_not_sign_non_hawk_requests(self):
"""Test that a 403 is returned if the request is not authenticated using Hawk."""
from rest_framework.test import force_authenticate
factory = APIRequestFactory()
user = OwnerFactory()
view = HawkViewWithScope.as_view()
request = factory.get("/test-hawk-with-scope/")
force_authenticate(request, user=user)
response = view(request)
assert response.status_code == status.HTTP_403_FORBIDDEN
assert response.data == {
"detail": "You do not have permission to perform this action.",
}
@pytest.mark.django_db
@pytest.mark.urls("app.enquiries.tests.test_auth")
class TestHawkScopePermission:
"""Tests scoped-based permissions using HawkScopePermission."""
def test_denies_access_when_without_the_required_scope(self, api_client, settings):
"""
Test that a 403 is returned if the request is Hawk authenticated but the client doesn't
have the required scope.
"""
settings.HAWK_CREDENTIALS = HAWK_CREDENTIALS
sender = _auth_sender(
key_id="test-id-without-scope",
secret_key="test-key-without-scope",
url=_url_with_scope,
)
response = api_client.get(
_url_with_scope(),
content_type="",
HTTP_AUTHORIZATION=sender.request_header,
)
assert response.status_code == status.HTTP_403_FORBIDDEN
assert response.json() == {
"detail": "You do not have permission to perform this action.",
}
def test_denies_access_if_not_authenticated_using_hawk(self):
"""Test that a 403 is returned if the request is not authenticated using Hawk."""
from rest_framework.test import force_authenticate
factory = APIRequestFactory()
user = OwnerFactory()
view = HawkViewWithScope.as_view()
request = factory.get("/test-hawk-with-scope/")
force_authenticate(request, user=user)
response = view(request)
assert response.status_code == status.HTTP_403_FORBIDDEN
assert response.data == {
"detail": "You do not have permission to perform this action.",
}
def test_authorises_when_with_the_required_scope(self, api_client, settings):
"""
Test that a 200 is returned if the request is Hawk authenticated and the client has
the required scope.
"""
settings.HAWK_CREDENTIALS = HAWK_CREDENTIALS
sender = _auth_sender(
key_id="test-id-with-scope",
secret_key="test-key-with-scope",
url=_url_with_scope,
)
response = api_client.get(
_url_with_scope(),
content_type="",
HTTP_AUTHORIZATION=sender.request_header,
)
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"content": "hawk-test-view-with-scope"}
def test_authorises_when_with_one_of_the_required_scopes(self, api_client, settings):
"""
Test that a 200 is returned if the request is Hawk authenticated and the client has
one of the required scope.
"""
settings.HAWK_CREDENTIALS = HAWK_CREDENTIALS
sender = _auth_sender(
key_id="test-id-with-multiple-scopes",
secret_key="test-key-with-multiple-scopes",
url=_url_with_scope,
)
response = api_client.get(
_url_with_scope(),
content_type="",
HTTP_AUTHORIZATION=sender.request_header,
)
assert response.status_code == status.HTTP_200_OK
assert response.json() == {"content": "hawk-test-view-with-scope"}
| 34.300216 | 99 | 0.607707 |
7941d32dbc9a6d8f5f932f59da624b6f97dc0965 | 24,953 | py | Python | modules/tools/prediction/data_pipelines/cruiseMLP_train.py | seeclong/apollo | 99c8afb5ebcae2a3c9359a156a957ff03944b27b | [
"Apache-2.0"
] | 27 | 2019-04-06T02:27:14.000Z | 2021-11-27T13:47:06.000Z | modules/tools/prediction/data_pipelines/cruiseMLP_train.py | seeclong/apollo | 99c8afb5ebcae2a3c9359a156a957ff03944b27b | [
"Apache-2.0"
] | 7 | 2021-03-10T18:14:25.000Z | 2022-02-27T04:46:46.000Z | modules/tools/prediction/data_pipelines/cruiseMLP_train.py | seeclong/apollo | 99c8afb5ebcae2a3c9359a156a957ff03944b27b | [
"Apache-2.0"
] | 38 | 2019-04-15T10:58:37.000Z | 2022-01-27T08:52:39.000Z | ###############################################################################
# Copyright 2018 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
@requirement:
pytorch 0.4.1
"""
import os
import h5py
import numpy as np
import logging
import argparse
import proto.cruise_model_pb2
from proto.cruise_model_pb2 import TensorParameter, InputParameter,\
Conv1dParameter, DenseParameter, ActivationParameter, MaxPool1dParameter,\
AvgPool1dParameter, LaneFeatureConvParameter, ObsFeatureFCParameter,\
ClassifyParameter, RegressParameter, CruiseModelParameter
from cruise_models import FullyConn_NN, FCNN_CNN1D
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader, sampler
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.utils import class_weight
from common.configure import parameters
# TODO(panjiacheng): the data-loader part needs to be modified.
# Constants
dim_input = parameters['cruise_mlp']['dim_input']
dim_hidden_1 = parameters['cruise_mlp']['dim_hidden_1']
dim_hidden_2 = parameters['cruise_mlp']['dim_hidden_2']
dim_output = parameters['cruise_mlp']['dim_output']
# Setup
cuda_is_available = torch.cuda.is_available()
logging.basicConfig(filename='training.log', level=logging.INFO)
def load_Conv1dParameter(model, key, stride=1):
model_pb = Conv1dParameter()
model_pb.shape.extend(list(model.state_dict()[key+'.weight'].shape))
model_pb.use_bias = True
kernel_param = TensorParameter()
kernel_param.shape.extend(list(model.state_dict()[key+'.weight'].shape))
kernel_param.data.extend(
list(model.state_dict()[key+'.weight'].numpy().reshape(-1)))
model_pb.kernel.CopyFrom(kernel_param)
bias_param = TensorParameter()
bias_param.shape.extend(list(model.state_dict()[key+'.bias'].shape))
bias_param.data.extend(
list(model.state_dict()[key+'.bias'].numpy().reshape(-1)))
model_pb.bias.CopyFrom(bias_param)
model_pb.stride = stride
return model_pb
def load_DenseParameter(model, key):
model_pb = DenseParameter()
model_pb.use_bias = True
weights_param = TensorParameter()
weights_param.shape.extend(
list(model.state_dict()[key+'.weight'].numpy().T.shape))
weights_param.data.extend(
list(model.state_dict()[key+'.weight'].numpy().T.reshape(-1)))
model_pb.weights.CopyFrom(weights_param)
bias_param = TensorParameter()
bias_param.shape.extend(
list(model.state_dict()[key+'.bias'].numpy().shape))
bias_param.data.extend(list(model.state_dict()[key+'.bias'].numpy()))
model_pb.bias.CopyFrom(bias_param)
model_pb.units = model_pb.bias.shape[0]
return model_pb
def save_FCNN_CNN1D(model, filename):
model_pb = CruiseModelParameter()
lane_feature_conv = LaneFeatureConvParameter()
lane_feature_conv.conv1d_0.CopyFrom(
load_Conv1dParameter(model, 'lane_feature_conv.0', stride=1))
lane_feature_conv.activation_1.activation = 'relu'
lane_feature_conv.conv1d_2.CopyFrom(
load_Conv1dParameter(model, 'lane_feature_conv.2', stride=2))
lane_feature_conv.activation_3.activation = 'relu'
lane_feature_conv.conv1d_4.CopyFrom(
load_Conv1dParameter(model, 'lane_feature_conv.4', stride=2))
lane_feature_maxpool = MaxPool1dParameter()
lane_feature_maxpool.kernel_size = 3
lane_feature_maxpool.stride = 3
lane_feature_avgpool = AvgPool1dParameter()
lane_feature_avgpool.kernel_size = 3
lane_feature_avgpool.stride = 3
obs_feature_fc = ObsFeatureFCParameter()
obs_feature_fc.linear_0.CopyFrom(
load_DenseParameter(model, 'obs_feature_fc.0'))
obs_feature_fc.activation_1.activation = 'sigmoid'
obs_feature_fc.linear_3.CopyFrom(
load_DenseParameter(model, 'obs_feature_fc.3'))
obs_feature_fc.activation_4.activation = 'sigmoid'
classify = ClassifyParameter()
classify.linear_0.CopyFrom(load_DenseParameter(model, 'classify.0'))
classify.activation_1.activation = 'sigmoid'
classify.linear_3.CopyFrom(load_DenseParameter(model, 'classify.3'))
classify.activation_4.activation = 'sigmoid'
classify.linear_6.CopyFrom(load_DenseParameter(model, 'classify.6'))
classify.activation_7.activation = 'sigmoid'
classify.linear_9.CopyFrom(load_DenseParameter(model, 'classify.9'))
classify.activation_10.activation = 'sigmoid'
regress = RegressParameter()
regress.linear_0.CopyFrom(load_DenseParameter(model, 'regress.0'))
regress.activation_1.activation = 'relu'
regress.linear_3.CopyFrom(load_DenseParameter(model, 'regress.3'))
regress.activation_4.activation = 'relu'
regress.linear_6.CopyFrom(load_DenseParameter(model, 'regress.6'))
regress.activation_7.activation = 'relu'
regress.linear_9.CopyFrom(load_DenseParameter(model, 'regress.9'))
regress.activation_10.activation = 'relu'
model_pb.lane_feature_conv.CopyFrom(lane_feature_conv)
model_pb.lane_feature_maxpool.CopyFrom(lane_feature_maxpool)
model_pb.lane_feature_avgpool.CopyFrom(lane_feature_avgpool)
model_pb.obs_feature_fc.CopyFrom(obs_feature_fc)
model_pb.classify.CopyFrom(classify)
model_pb.regress.CopyFrom(regress)
with open(filename, 'wb') as params_file:
params_file.write(model_pb.SerializeToString())
'''
Custom defined loss function that lumps the loss of classification and
of regression together.
'''
def loss_fn(c_pred, r_pred, target, balance):
loss_C = nn.BCEWithLogitsLoss(
pos_weight=torch.FloatTensor([balance]).cuda()) # nn.BCELoss()
loss_R = nn.MSELoss()
#loss = loss_C(c_pred, target[:,0].view(target.shape[0],1))
loss = 4 * loss_C(c_pred, target[:, 0].view(target.shape[0], 1)) + \
loss_R(((target[:, 2] > 0.0) * (target[:, 2] <= 3.0)).float().view(target.shape[0], 1) * r_pred +
((target[:, 2] <= 0.0) + (target[:, 2] > 3.0)).float().view(
target.shape[0], 1) * target[:, 2].view(target.shape[0], 1),
target[:, 2].view(target.shape[0], 1))
#loss_R((target[:,1] < 10.0).float().view(target.shape[0],1) * r_pred + \
# (target[:,1] >= 10.0).float().view(target.shape[0],1) * target[:,1].view(target.shape[0],1), \
# target[:,1].view(target.shape[0],1))
return loss
# ========================================================================
# Helper functions
'''
Get the full path of all files under the directory: 'dirName'
'''
def getListOfFiles(dirName):
listOfFiles = os.listdir(dirName)
allFiles = list()
for entry in listOfFiles:
fullPath = os.path.join(dirName, entry)
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
else:
allFiles.append(fullPath)
return allFiles
'''
Print the distribution of data labels.
'''
def print_dist(label):
unique_labels = np.unique(label)
for l in unique_labels:
print ('Label = {}: {}%'.format(l, np.sum(label == l)/len(label)*100))
# ========================================================================
# ========================================================================
# Data Loading and preprocessing (Non Data-Loader case)
def load_data(filename):
'''
Load the data from h5 file to the numpy format.
(Only for non data-loader case)
'''
if not (os.path.exists(filename)):
logging.error("file: {}, does not exist".format(filename))
os._exit(1)
if os.path.splitext(filename)[1] != '.h5':
logging.error("file: {} is not an hdf5 file".format(filename))
os._exit(1)
samples = dict()
h5_file = h5py.File(filename, 'r')
for key in h5_file.keys():
samples[key] = h5_file[key][:]
print("load file success")
return samples['data']
def load_npy_data(dir):
'''
Load all .npy files under a certain dir;
merge them together into one;
return.
'''
def data_preprocessing(data):
'''
Preprocess the data.
(Only for non data-loader case)
- separate input X and output y
- process output label from {-1,0,1,2,3,4} to {0,1}
- Take out only those meaningful features
- shuffle data
'''
# Various input features separation
X_obs_old_features = data[:, 0:23]
X_surround_obs = data[:, -dim_output-8:-dim_output]
X_obs_now = data[:, 23:32]
X_obs_hist_5 = data[:, 23:68]
X_lane = data[:, 68:-dim_output-8]
# mask out those that don't have any history
# mask5 = (data[:,53] != 100)
X = np.concatenate((X_obs_old_features, X_obs_hist_5, X_lane), axis=1)
# X = X[mask5, :]
y = data[:, -dim_output:]
# y = y[mask5, :]
# Binary classification
y[:, 0] = (y[:, 0] > 0).astype(float)
#y[:, 0] = np.logical_and((y[:, 0] > 0), (y[:, 1] < 1.0))
# Random shuffling
X_new, X_dummy, y_new, y_dummy = train_test_split(
X, y, test_size=0.0, random_state=233)
return X_new, y_new # , X_dummy, y_dummy
# ========================================================================
# ========================================================================
# Data Loading and preprocessing (Data-Loader case)
'''
TODO: implement custom collate_fn to incorporate down-sampling function
for certain labels.
'''
def collate_wDownSample(batch):
return None
'''
If datasets are too large, use Dataloader to load from disk.
'''
class TrainValidDataset(Dataset):
'''
Args:
- root_dir (string): Directory containing all folders with different
dates, each folder containing .cruise.h5 data files.
'''
def __init__(self, list_of_files):
self.list_of_files_ = list_of_files
self.data_size_until_this_file_ = []
self.dataset_size = 0
for file in self.list_of_files_:
with h5py.File(file, 'r') as h5_file:
data_size = h5_file[list(h5_file.keys())[0]].shape[0]
self.dataset_size += data_size
self.data_size_until_this_file_.append(self.dataset_size)
#print ('Total size of dataset: {}'.format(self.data_size_until_this_file_))
def __len__(self):
return self.dataset_size
def __getitem__(self, index):
bin_idx = self.FindBin(index, 0, len(
self.data_size_until_this_file_)-1)
with h5py.File(self.list_of_files_[bin_idx], 'r') as h5_file:
idx_offset = self.data_size_until_this_file_[bin_idx] - \
h5_file[list(h5_file.keys())[0]].shape[0]
data = h5_file[list(h5_file.keys())[0]][index-idx_offset]
label = data[-dim_output:]
label[0] = (label[0] > 0.0).astype(float)
return data[:-dim_output], label
# Binary search to expedite the data-loading process.
def FindBin(self, index, start, end):
if (start == end):
return start
mid = int((start+end)/2.0)
if (self.data_size_until_this_file_[mid] <= index):
return self.FindBin(index, mid+1, end)
else:
return self.FindBin(index, start, mid)
# ========================================================================
# ========================================================================
# Data training and validation
'''
Train the data. (vanilla version without dataloader)
'''
def train_vanilla(train_X, train_y, model, optimizer, epoch, batch_size=2048, balance=1.0):
model.train()
loss_history = []
logging.info('Epoch: {}'.format(epoch+1))
print ('Epoch: {}.'.format(epoch+1))
num_of_data = train_X.shape[0]
num_of_batch = int(num_of_data / batch_size) + 1
pred_y = None
for i in range(num_of_batch):
optimizer.zero_grad()
X = train_X[i*batch_size: min(num_of_data, (i+1)*batch_size), ]
y = train_y[i*batch_size: min(num_of_data, (i+1)*batch_size), ]
c_pred, r_pred = model(X)
loss = loss_fn(c_pred, r_pred, y, balance)
loss_history.append(loss.data)
loss.backward()
optimizer.step()
c_pred = c_pred.data.cpu().numpy()
c_pred = c_pred.reshape(c_pred.shape[0], 1)
pred_y = np.concatenate((pred_y, c_pred), axis=0) if pred_y is not None \
else c_pred
if (i > 0) and (i % 100 == 0):
logging.info('Step: {}, train_loss: {}'.format(
i, np.mean(loss_history[-100:])))
print ("Step: {}, training loss: {}".format(
i, np.mean(loss_history[-100:])))
pred_y = (pred_y > 0.0)
train_y = train_y.data.cpu().numpy()
training_accuracy = sklearn.metrics.accuracy_score(
train_y[:, 0], pred_y.reshape(-1))
train_loss = np.mean(loss_history)
logging.info('Training loss: {}'.format(train_loss))
logging.info('Training Accuracy: {}.'.format(training_accuracy))
print ('Training Loss: {}. Training Accuracy: {}'
.format(train_loss, training_accuracy))
'''
Validation (vanilla version without dataloader)
'''
def validate_vanilla(valid_X, valid_y, model, batch_size=2048, balance=1.0, pos_label=1.0):
model.eval()
loss_history = []
num_of_data = valid_X.shape[0]
num_of_batch = int(num_of_data / batch_size) + 1
pred_y = None
for i in range(num_of_batch):
X = valid_X[i*batch_size: min(num_of_data, (i+1)*batch_size), ]
y = valid_y[i*batch_size: min(num_of_data, (i+1)*batch_size), ]
c_pred, r_pred = model(X)
valid_loss = loss_fn(c_pred, r_pred, y, balance)
loss_history.append(valid_loss.data)
c_pred = c_pred.data.cpu().numpy()
c_pred = c_pred.reshape(c_pred.shape[0], 1)
pred_y = np.concatenate((pred_y, c_pred), axis=0) if pred_y is not None \
else c_pred
valid_y = valid_y.data.cpu().numpy()
valid_auc = sklearn.metrics.roc_auc_score(
valid_y[:, 0], pred_y.reshape(-1))
pred_y = (pred_y > 0.0)
valid_accuracy = sklearn.metrics.accuracy_score(
valid_y[:, 0], pred_y.reshape(-1))
valid_precision = sklearn.metrics.precision_score(
valid_y[:, 0], pred_y.reshape(-1), pos_label=pos_label)
valid_recall = sklearn.metrics.recall_score(
valid_y[:, 0], pred_y.reshape(-1), pos_label=pos_label)
logging.info('Validation loss: {}. Accuracy: {}.\
Precision: {}. Recall: {}. AUC: {}.'
.format(np.mean(loss_history), valid_accuracy, valid_precision,
valid_recall, valid_auc))
print ('Validation loss: {}. Accuracy: {}.\
Precision: {}. Recall: {}. AUC: {}.'
.format(np.mean(loss_history), valid_accuracy, valid_precision,
valid_recall, valid_auc))
return np.mean(loss_history)
'''
Train the data. (using dataloader)
'''
def train_dataloader(train_loader, model, optimizer, epoch, balance=1.0):
model.train()
loss_history = []
train_correct_class = 0
total_size = 0
logging.info('Epoch: {}'.format(epoch))
for i, (inputs, targets) in enumerate(train_loader):
total_size += targets.shape[0]
optimizer.zero_grad()
if cuda_is_available:
X = (inputs).float().cuda()
y = (targets).float().cuda()
c_pred, r_pred = model(X)
loss = loss_fn(c_pred, r_pred, y, balance)
#loss.data[0].cpu().numpy()
loss_history.append(loss.data)
loss.backward()
optimizer.step()
train_correct_class += \
np.sum((c_pred.data.cpu().numpy() > 0.5).astype(float) ==
y[:, 0].data.cpu().numpy().reshape(c_pred.data.cpu().numpy().shape[0], 1))
#if i > 100:
# break
if i % 100 == 0:
logging.info('Step: {}, train_loss: {}'.format(
i, np.mean(loss_history[-100:])))
print ("Step: {}, training loss: {}".format(
i, np.mean(loss_history[-100:])))
train_loss = np.mean(loss_history)
logging.info('Training loss: {}'.format(train_loss))
print ('Epoch: {}. Training Loss: {}'.format(epoch, train_loss))
'''
Validation (using dataloader)
'''
def validate_dataloader(valid_loader, model, balance=1.0):
model.eval()
loss_history = []
valid_correct_class = 0.0
total_size = 0
for i, (X, y) in enumerate(valid_loader):
total_size += y.shape[0]
if cuda_is_available:
X = X.float().cuda()
y = y.float().cuda()
c_pred, r_pred = model(X)
valid_loss = loss_fn(c_pred, r_pred, y, balance)
loss_history.append(valid_loss.data)
valid_correct_class += \
np.sum((c_pred.data.cpu().numpy() > 0.5).astype(float) ==
y[:, 0].data.cpu().numpy().reshape(c_pred.data.cpu().numpy().shape[0], 1))
valid_classification_accuracy = valid_correct_class / total_size
logging.info('Validation loss: {}. Validation classification accuracy: {}'
.format(np.mean(loss_history), valid_classification_accuracy))
print ('Validation loss: {}. Classification accuracy: {}.'
.format(np.mean(loss_history), valid_classification_accuracy))
return valid_loss
# ========================================================================
# ========================================================================
# Main function:
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='train neural network based on feature files and save parameters')
parser.add_argument('train_file', type=str, help='training data (h5)')
parser.add_argument('valid_file', type=str, help='validation data (h5)')
parser.add_argument('-n', '--network-structure', type=int, default=1,
help='Specify which network to use:\n \
\t 0: Fully connected neural network.\n \
\t 1: 1D-CNN for lane feature extraction.')
parser.add_argument('-d', '--data-loader', action='store_true',
help='Use the dataloader (when memory size is smaller than dataset size)')
parser.add_argument('-s', '--save-path', type=str, default='./',
help='Specify the directory to save trained models.')
parser.add_argument('-g', '--go', action='store_true',
help='It is training lane-follow (go) cases.')
parser.add_argument('-b', '--balance', type=float, default=1.0,
help='Specify the weight for positive predictions.')
#parser.add_argument('-g', '--gpu_num', type=int, default=0, \
# help='Specify which GPU to use.')
args = parser.parse_args()
#os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' #specifies the same order as nvidia-smi
#os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_num)
if not args.data_loader:
# Load from file and print out general information of the data.
train_file = args.train_file
valid_file = args.valid_file
train_data = load_data(train_file)
valid_data = load_data(valid_file)
print ('Data loaded successfully.')
classes_train = np.asarray(train_data[:, -dim_output])
print ('Total number of training samples: {}'.format(len(classes_train)))
print ('Training set distribution:')
print_dist(classes_train)
classes_valid = np.asarray(valid_data[:, -dim_output])
print ('Total number of validation samples: {}'.format(len(classes_valid)))
print ('Validation set distribution:')
print_dist(classes_valid)
# Data preprocessing
X_train, y_train = data_preprocessing(train_data)
X_valid, y_valid = data_preprocessing(valid_data)
# Model declaration
model = None
if args.network_structure == 0:
model = FullyConn_NN()
elif args.network_structure == 1:
model = FCNN_CNN1D()
print ("The model used is: ")
print (model)
learning_rate = 6.561e-4
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=0.3, patience=2, min_lr=1e-8, verbose=1, mode='min')
# CUDA set-up:
cuda_is_available = torch.cuda.is_available()
if (cuda_is_available):
print ("Using CUDA to speed up training.")
model.cuda()
X_train = Variable(torch.FloatTensor(X_train).cuda())
X_valid = Variable(torch.FloatTensor(X_valid).cuda())
y_train = Variable(torch.FloatTensor(y_train).cuda())
y_valid = Variable(torch.FloatTensor(y_valid).cuda())
# Model training:
pos_label = 1.0
if args.go:
pos_label = 0.0
best_valid_loss = float('+inf')
for epoch in range(50):
train_vanilla(X_train, y_train, model, optimizer,
epoch, balance=args.balance)
valid_loss = validate_vanilla(
X_valid, y_valid, model, balance=args.balance, pos_label=pos_label)
scheduler.step(valid_loss)
if valid_loss < best_valid_loss:
best_valid_loss = valid_loss
torch.save(model.state_dict(), args.save_path + 'cruise_model{}_epoch{}_valloss{:.6f}.pt'
.format(args.network_structure, epoch+1, valid_loss))
else:
train_dir = args.train_file
valid_dir = args.valid_file
# Data preprocessing (training data balancing).
list_of_training_files = getListOfFiles(train_dir)
list_of_validation_files = getListOfFiles(valid_dir)
classes_train = []
for file in list_of_training_files:
with h5py.File(file, 'r') as h5_file:
data = h5_file[list(h5_file.keys())[0]][:, -2]
classes_train.append(data.tolist())
# "Flattening" the list of lists
classes_train = [item for sublist in classes_train for item in sublist]
classes_train = np.asarray(classes_train)
print ('Total number of training samples: {}'.format(len(classes_train)))
print ('Training set distribution:')
print_dist(classes_train)
classes_valid = []
for file in list_of_validation_files:
with h5py.File(file, 'r') as h5_file:
data = h5_file[list(h5_file.keys())[0]][:, -2]
classes_valid.append(data.tolist())
# "Flattening" the list of lists
classes_valid = [item for sublist in classes_valid for item in sublist]
classes_valid = np.asarray(classes_valid)
print ('Total number of validation samples: {}'.format(len(classes_valid)))
print ('Validation set distribution:')
print_dist(classes_valid)
#class_weights = class_weight.compute_class_weight('balanced', np.unique(classes_train), classes_train)
#weights = [class_weights[int(i+1)] for i in classes_train]
#weights = torch.DoubleTensor(weights)
#train_sampler = sampler.WeightedRandomSampler(weights, int(len(weights)/1), replacement=True)
model = FCNN_CNN1D()
learning_rate = 6.561e-4
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=0.3, patience=2, min_lr=1e-8, verbose=1, mode='min')
if (cuda_is_available):
print ('Using CUDA to speed up training.')
model.cuda()
train_dataset = TrainValidDataset(list_of_training_files)
valid_dataset = TrainValidDataset(list_of_validation_files)
train_loader = DataLoader(train_dataset, batch_size=1024, num_workers=8,
pin_memory=True, shuffle=True) # sampler=train_sampler)
valid_loader = DataLoader(
valid_dataset, batch_size=1024, num_workers=8, pin_memory=True)
for epoch in range(100):
train_dataloader(train_loader, model, optimizer, epoch)
valid_loss = validate_dataloader(valid_loader, model)
scheduler.step(valid_loss)
# ========================================================================
| 36.374636 | 111 | 0.625135 |
7941d3f85c65a9427a9c5a3bb6de94f55fab2624 | 5,157 | py | Python | support/blink1_ctypes.py | fullphat/redsquare | cd0114e605a90930210e61ea31a88f8fbc1325b9 | [
"MIT"
] | 1 | 2020-05-25T21:32:35.000Z | 2020-05-25T21:32:35.000Z | support/blink1_ctypes.py | fullphat/redsquare | cd0114e605a90930210e61ea31a88f8fbc1325b9 | [
"MIT"
] | null | null | null | support/blink1_ctypes.py | fullphat/redsquare | cd0114e605a90930210e61ea31a88f8fbc1325b9 | [
"MIT"
] | null | null | null | """
blink1_ctypes.py -- blink(1) Python library
Uses ctypes wrapper around blink1-lib C library (which in turn wraps HIDAPI)
Make sure you have the blink1-lib shared library in the same directory
as blink1_ctypes.py or in your LD_LIBRARY_PATH
Based on Stephen Youndt's script on how to wrap the C library
2013, Tod E. Kurt, http://thingm.com/
"""
import time
from ctypes import *
from ctypes.util import find_library
import inspect, os
import glob
# Find the blink1-lib C library
localpath = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
libname = find_library("blink1")
if libname is None:
libname = find_library("Blink1")
if libname is None:
libname = find_library("blink1-lib")
if libname is None:
pathlist = glob.glob(os.path.join(localpath, '[Bb]link1-lib.so')) # unix
if pathlist: libname = pathlist[-1]
if libname is None:
pathlist = glob.glob(os.path.join(localpath, 'blink1-lib.dll')) # windows
if pathlist: libname = pathlist[-1]
if libname is None:
pathlist = glob.glob(os.path.join(localpath, 'lib[Bb]link1*')) # mac
if pathlist: libname = pathlist[-1]
# If we found the library, load it
if not libname:
raise ImportError("no blink1-lib shared library found")
libblink1 = CDLL(libname)
enumerate = libblink1.blink1_enumerate
enumerate.restype = c_int
enumerateByVidPid = libblink1.blink1_enumerateByVidPid
enumerateByVidPid.restype = c_int
enumerateByVidPid.argtypes = [c_int, c_int]
getCachedPath = libblink1.blink1_getCachedPath
getCachedPath.restype = c_char_p
getCachedPath.argtypes = [c_int]
getCachedSerial = libblink1.blink1_getCachedSerial
getCachedSerial.restype = c_char_p
getCachedSerial.argtypes = [c_int]
getCachedCount = libblink1.blink1_getCachedCount
getCachedCount.restype = c_int
open = libblink1.blink1_open
open.restype = c_void_p
openByPath = libblink1.blink1_openByPath
openByPath.restype = c_void_p
openByPath.argtypes = [c_char_p]
openBySerial = libblink1.blink1_openBySerial
openBySerial.restype = c_void_p
openBySerial.argtypes = [c_char_p]
openById = libblink1.blink1_openById
openById.restype = c_void_p
openById.argtypes = [c_int]
close = libblink1.blink1_close
close.argtypes = [c_void_p]
#
getVersion = libblink1.blink1_getVersion
getVersion.restype = c_int
getVersion.argtypes = [c_void_p]
fadeToRGB = libblink1.blink1_fadeToRGB
fadeToRGB.restype = c_int
fadeToRGB.argtypes = [c_void_p, c_ushort, c_ubyte, c_ubyte, c_ubyte]
fadeToRGBN = libblink1.blink1_fadeToRGBN
fadeToRGBN.restype = c_int
fadeToRGBN.argtypes = [c_void_p, c_ushort, c_ubyte, c_ubyte, c_ubyte, c_ubyte]
setRGB = libblink1.blink1_setRGB
setRGB.restype = c_int
setRGB.argtypes = [c_void_p, c_ubyte, c_ubyte, c_ubyte]
serverdown = libblink1.blink1_serverdown
serverdown.restype = c_int
serverdown.argtypes = [c_void_p, c_ubyte, c_ushort]
play = libblink1.blink1_play
play.restype = c_int
play.argtypes = [c_void_p, c_ubyte, c_ubyte]
writePatternLine = libblink1.blink1_writePatternLine
writePatternLine.restype = c_int
writePatternLine.argtypes = [c_void_p, c_ushort, c_ubyte, c_ubyte, c_ubyte, c_ubyte]
readPatternLine = libblink1.blink1_readPatternLine
readPatternLine.restype = c_int
readPatternLine.argtypes = [c_void_p, c_void_p,c_void_p,c_void_p,c_void_p,c_void_p]
enableDegamma = libblink1.blink1_enableDegamma
disableDegamma = libblink1.blink1_disableDegamma
#################################################################################
debug_rw = False
class Blink1:
def __init__(self):
self.dev = None
self.open()
def find(self):
return self.open()
def enumerate(self):
enumerate()
def open(self):
self.close()
self.dev = open()
def open_by_id(self,id):
self.dev = openById(id)
def close(self):
if self.dev != None:
close(self.dev)
self.dev = None
def notfound(self):
return None # fixme what to do here
def fade_to_rgbn(self, fade_millis, red,green,blue, ledn):
"""
Command blink(1) to fade to RGB color
"""
return fadeToRGBN( self.dev, fade_millis, red,green,blue, ledn)
def fade_to_rgb(self, fade_millis, red,green,blue):
"""
Command blink(1) to fade to RGB color
"""
return self.fade_to_rgbn( fade_millis, red,green,blue, 0)
def playloop(self, play,startpos,endpos,count):
"""
"""
playloop(self.dev, play, startpos,endpos, count)
def play(self, play,startpos):
"""
"""
playloop(self.dev, play, startpos,endpos, count)
def get_version(self):
"""
Get blink(1) firmware version
"""
return str(getVersion(self.dev))
def get_serialnumber(self):
'''
Get blink(1) serial number
'''
sernum = getCachedSerial(0)
if not sernum : sernum = '00000000'
return sernum
def get_serialnumbers(self): # FIXME:
seriallist = []
for i in range(0, getCachedCount()):
seriallist.append( getCachedSerial(i) )
return seriallist
| 28.65 | 85 | 0.696529 |
7941d4c41897eefbb29ff935d1b88637e441b89e | 7,302 | py | Python | venv/lib/python3.6/site-packages/sqlalchemy/dialects/mysql/mysqldb.py | tchengatcincoai/cryptocoin-prices-compare | f295fecc7213a877bf717af0eb98414e9137b554 | [
"MIT"
] | 78 | 2017-08-19T03:46:13.000Z | 2020-02-19T04:29:45.000Z | desktop/core/ext-py/SQLAlchemy-1.2.0b3/lib/sqlalchemy/dialects/mysql/mysqldb.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 5 | 2017-08-21T16:33:08.000Z | 2018-06-21T18:37:18.000Z | desktop/core/ext-py/SQLAlchemy-1.2.0b3/lib/sqlalchemy/dialects/mysql/mysqldb.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 43 | 2018-02-05T23:23:46.000Z | 2021-07-28T22:51:42.000Z | # mysql/mysqldb.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+mysqldb
:name: MySQL-Python
:dbapi: mysqldb
:connectstring: mysql+mysqldb://<user>:<password>@<host>[:<port>]/<dbname>
:url: http://sourceforge.net/projects/mysql-python
.. _mysqldb_unicode:
Unicode
-------
Please see :ref:`mysql_unicode` for current recommendations on unicode
handling.
Py3K Support
------------
Currently, MySQLdb only runs on Python 2 and development has been stopped.
`mysqlclient`_ is fork of MySQLdb and provides Python 3 support as well
as some bugfixes.
.. _mysqlclient: https://github.com/PyMySQL/mysqlclient-python
Using MySQLdb with Google Cloud SQL
-----------------------------------
Google Cloud SQL now recommends use of the MySQLdb dialect. Connect
using a URL like the following::
mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/<projectid>:<instancename>
Server Side Cursors
-------------------
The mysqldb dialect supports server-side cursors. See :ref:`mysql_ss_cursors`.
"""
from .base import (MySQLDialect, MySQLExecutionContext,
MySQLCompiler, MySQLIdentifierPreparer)
from .base import TEXT
from ... import sql
from ... import util
import re
class MySQLExecutionContext_mysqldb(MySQLExecutionContext):
@property
def rowcount(self):
if hasattr(self, '_rowcount'):
return self._rowcount
else:
return self.cursor.rowcount
class MySQLCompiler_mysqldb(MySQLCompiler):
pass
class MySQLIdentifierPreparer_mysqldb(MySQLIdentifierPreparer):
pass
class MySQLDialect_mysqldb(MySQLDialect):
driver = 'mysqldb'
supports_unicode_statements = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_native_decimal = True
default_paramstyle = 'format'
execution_ctx_cls = MySQLExecutionContext_mysqldb
statement_compiler = MySQLCompiler_mysqldb
preparer = MySQLIdentifierPreparer_mysqldb
def __init__(self, server_side_cursors=False, **kwargs):
super(MySQLDialect_mysqldb, self).__init__(**kwargs)
self.server_side_cursors = server_side_cursors
@util.langhelpers.memoized_property
def supports_server_side_cursors(self):
try:
cursors = __import__('MySQLdb.cursors').cursors
self._sscursor = cursors.SSCursor
return True
except (ImportError, AttributeError):
return False
@classmethod
def dbapi(cls):
return __import__('MySQLdb')
def do_executemany(self, cursor, statement, parameters, context=None):
rowcount = cursor.executemany(statement, parameters)
if context is not None:
context._rowcount = rowcount
def _check_unicode_returns(self, connection):
# work around issue fixed in
# https://github.com/farcepest/MySQLdb1/commit/cd44524fef63bd3fcb71947392326e9742d520e8
# specific issue w/ the utf8_bin collation and unicode returns
has_utf8_bin = self.server_version_info > (5, ) and \
connection.scalar(
"show collation where %s = 'utf8' and %s = 'utf8_bin'"
% (
self.identifier_preparer.quote("Charset"),
self.identifier_preparer.quote("Collation")
))
if has_utf8_bin:
additional_tests = [
sql.collate(sql.cast(
sql.literal_column(
"'test collated returns'"),
TEXT(charset='utf8')), "utf8_bin")
]
else:
additional_tests = []
return super(MySQLDialect_mysqldb, self)._check_unicode_returns(
connection, additional_tests)
def create_connect_args(self, url):
opts = url.translate_connect_args(database='db', username='user',
password='passwd')
opts.update(url.query)
util.coerce_kw_type(opts, 'compress', bool)
util.coerce_kw_type(opts, 'connect_timeout', int)
util.coerce_kw_type(opts, 'read_timeout', int)
util.coerce_kw_type(opts, 'client_flag', int)
util.coerce_kw_type(opts, 'local_infile', int)
# Note: using either of the below will cause all strings to be
# returned as Unicode, both in raw SQL operations and with column
# types like String and MSString.
util.coerce_kw_type(opts, 'use_unicode', bool)
util.coerce_kw_type(opts, 'charset', str)
# Rich values 'cursorclass' and 'conv' are not supported via
# query string.
ssl = {}
keys = ['ssl_ca', 'ssl_key', 'ssl_cert', 'ssl_capath', 'ssl_cipher']
for key in keys:
if key in opts:
ssl[key[4:]] = opts[key]
util.coerce_kw_type(ssl, key[4:], str)
del opts[key]
if ssl:
opts['ssl'] = ssl
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
# supports_sane_rowcount.
client_flag = opts.get('client_flag', 0)
if self.dbapi is not None:
try:
CLIENT_FLAGS = __import__(
self.dbapi.__name__ + '.constants.CLIENT'
).constants.CLIENT
client_flag |= CLIENT_FLAGS.FOUND_ROWS
except (AttributeError, ImportError):
self.supports_sane_rowcount = False
opts['client_flag'] = client_flag
return [[], opts]
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile(r'[.\-]')
for n in r.split(dbapi_con.get_server_info()):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
def _extract_error_code(self, exception):
return exception.args[0]
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
try:
# note: the SQL here would be
# "SHOW VARIABLES LIKE 'character_set%%'"
cset_name = connection.connection.character_set_name
except AttributeError:
util.warn(
"No 'character_set_name' can be detected with "
"this MySQL-Python version; "
"please upgrade to a recent version of MySQL-Python. "
"Assuming latin1.")
return 'latin1'
else:
return cset_name()
_isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
'READ COMMITTED', 'REPEATABLE READ',
'AUTOCOMMIT'])
def _set_isolation_level(self, connection, level):
if level == 'AUTOCOMMIT':
connection.autocommit(True)
else:
connection.autocommit(False)
super(MySQLDialect_mysqldb, self)._set_isolation_level(connection,
level)
dialect = MySQLDialect_mysqldb
| 33.040724 | 95 | 0.618461 |
7941d5639f8d2aa0213dc65774928f1e91249cec | 1,254 | py | Python | mbrl/third_party/pytorch_sac/agent/critic.py | pecey/mbrl-lib | ebca518b35a1370dbaede2a1c96fcde714bc5489 | [
"MIT"
] | 592 | 2021-04-20T04:30:18.000Z | 2022-03-30T13:34:54.000Z | mbrl/third_party/pytorch_sac/agent/critic.py | pecey/mbrl-lib | ebca518b35a1370dbaede2a1c96fcde714bc5489 | [
"MIT"
] | 57 | 2021-04-21T17:20:05.000Z | 2022-03-28T15:31:45.000Z | mbrl/third_party/pytorch_sac/agent/critic.py | pecey/mbrl-lib | ebca518b35a1370dbaede2a1c96fcde714bc5489 | [
"MIT"
] | 76 | 2021-04-20T15:50:14.000Z | 2022-03-25T19:05:25.000Z | import torch
from torch import nn
from mbrl.third_party.pytorch_sac import utils
class DoubleQCritic(nn.Module):
"""Critic network, employes double Q-learning."""
def __init__(self, obs_dim, action_dim, hidden_dim, hidden_depth):
super().__init__()
self.Q1 = utils.mlp(obs_dim + action_dim, hidden_dim, 1, hidden_depth)
self.Q2 = utils.mlp(obs_dim + action_dim, hidden_dim, 1, hidden_depth)
self.outputs = dict()
self.apply(utils.weight_init)
def forward(self, obs, action):
assert obs.size(0) == action.size(0)
obs_action = torch.cat([obs, action], dim=-1)
q1 = self.Q1(obs_action)
q2 = self.Q2(obs_action)
self.outputs["q1"] = q1
self.outputs["q2"] = q2
return q1, q2
def log(self, logger, step):
for k, v in self.outputs.items():
logger.log_histogram(f"train_critic/{k}_hist", v, step)
assert len(self.Q1) == len(self.Q2)
for i, (m1, m2) in enumerate(zip(self.Q1, self.Q2)):
assert type(m1) == type(m2)
if type(m1) is nn.Linear:
logger.log_param(f"train_critic/q1_fc{i}", m1, step)
logger.log_param(f"train_critic/q2_fc{i}", m2, step)
| 30.585366 | 78 | 0.606858 |
7941d58023e4359b860a8ca02acfd8a6374c3557 | 3,016 | py | Python | dmonscikit/dmonscikitdbscan.py | dice-project/DICE-Anomaly-Detection-Tool | a5eeacb9e888348adbe97be0c26a500f2f03ec6f | [
"Apache-2.0"
] | 4 | 2017-02-06T15:33:06.000Z | 2018-05-08T01:43:03.000Z | dmonscikit/dmonscikitdbscan.py | dice-project/DICE-Anomaly-Detection-Tool | a5eeacb9e888348adbe97be0c26a500f2f03ec6f | [
"Apache-2.0"
] | null | null | null | dmonscikit/dmonscikitdbscan.py | dice-project/DICE-Anomaly-Detection-Tool | a5eeacb9e888348adbe97be0c26a500f2f03ec6f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
import os
import pandas as pd
##############################################################################
# Generate sample data
# centers = [[1, 1], [-1, -1], [1, -1]]
# X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
# random_state=0)
# print labels_true
# print X
dataDir = os.path.join(os.path.dirname(os.path.abspath('')), 'data')
data = pd.read_csv(os.path.join(dataDir, 'Final_Merge.csv'))
print data.shape
# kmeans_model = KMeans(n_clusters=5, random_state=1)
# good_columns = data._get_numeric_data()
X = StandardScaler().fit_transform(data)
# print X
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.9, min_samples=10).fit(X)
# core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
#
# core_samples_mask[db.core_sample_indices_] = True
print type(db)
print db.leaf_size
print db.algorithm
print db.eps
print db.min_samples
print db.n_jobs
labels = db.labels_
print labels
# print X[labels == -1]
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
# print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
# print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
# print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
# print("Adjusted Rand Index: %0.3f"
# % metrics.adjusted_rand_score(labels_true, labels))
# print("Adjusted Mutual Information: %0.3f"
# % metrics.adjusted_mutual_info_score(labels_true, labels))
# print("Silhouette Coefficient: %0.3f"
# % metrics.silhouette_score(X, labels))
# ##############################################################################
# # Plot result
# import matplotlib.pyplot as plt
#
# # Black removed and is used for noise instead.
# unique_labels = set(labels)
# colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
# for k, col in zip(unique_labels, colors):
# if k == -1:
# # Black used for noise.
# col = 'k'
#
# class_member_mask = (labels == k)
#
# xy = X[class_member_mask & core_samples_mask]
# plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
# markeredgecolor='k', markersize=14)
#
# xy = X[class_member_mask & ~core_samples_mask]
# plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
# markeredgecolor='k', markersize=6)
#
# plt.title('Estimated number of clusters: %d' % n_clusters_)
# plt.show()
| 30.16 | 80 | 0.623674 |
7941d5c82199f6fd04f65d3d6c085ae095c8c9aa | 2,814 | py | Python | var/spack/repos/builtin/packages/rocm-device-libs/package.py | keerthana4708/spack | b3deaed2b32fe410b40537d969460f084dddadbb | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/rocm-device-libs/package.py | keerthana4708/spack | b3deaed2b32fe410b40537d969460f084dddadbb | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/rocm-device-libs/package.py | keerthana4708/spack | b3deaed2b32fe410b40537d969460f084dddadbb | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2022-02-22T12:42:18.000Z | 2022-02-22T12:42:18.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RocmDeviceLibs(CMakePackage):
"""set of AMD specific device-side language runtime libraries"""
homepage = "https://github.com/RadeonOpenCompute/ROCm-Device-Libs"
git = "https://github.com/RadeonOpenCompute/ROCm-Device-Libs.git"
url = "https://github.com/RadeonOpenCompute/ROCm-Device-Libs/archive/rocm-4.5.0.tar.gz"
maintainers = ['srekolam', 'arjun-raj-kuppala', 'haampie']
version('master', branch='amd-stg-open')
version('4.5.2', sha256='50e9e87ecd6b561cad0d471295d29f7220e195528e567fcabe2ec73838979f61')
version('4.5.0', sha256='78412fb10ceb215952b5cc722ed08fa82501b5848d599dc00744ae1bdc196f77')
version('4.3.1', sha256='a7291813168e500bfa8aaa5d1dccf5250764ddfe27535def01b51eb5021d4592')
version('4.3.0', sha256='055a67e63da6491c84cd45865500043553fb33c44d538313dd87040a6f3826f2')
version('4.2.0', sha256='34a2ac39b9bb7cfa8175cbab05d30e7f3c06aaffce99eed5f79c616d0f910f5f')
version('4.1.0', sha256='f5f5aa6bfbd83ff80a968fa332f80220256447c4ccb71c36f1fbd2b4a8e9fc1b')
version('4.0.0', sha256='d0aa495f9b63f6d8cf8ac668f4dc61831d996e9ae3f15280052a37b9d7670d2a')
version('3.10.0', sha256='bca9291385d6bdc91a8b39a46f0fd816157d38abb1725ff5222e6a0daa0834cc')
version('3.9.0', sha256='c99f45dacf5967aef9a31e3731011b9c142446d4a12bac69774998976f2576d7')
version('3.8.0', sha256='e82cc9a8eb7d92de02cabb856583e28f17a05c8cf9c97aec5275608ef1a38574')
version('3.7.0', sha256='b3a114180bf184b3b829c356067bc6a98021d52c1c6f9db6bc57272ebafc5f1d')
version('3.5.0', sha256='dce3a4ba672c4a2da4c2260ee4dc96ff6dd51877f5e7e1993cb107372a35a378')
variant('build_type', default='Release', values=("Release", "Debug", "RelWithDebInfo"), description='CMake build type')
depends_on('cmake@3:', type='build')
depends_on('zlib', type='link', when='@3.9.0:')
depends_on('texinfo', type='link', when='@3.9.0:')
# Make sure llvm is not built with rocm-device-libs (that is, it's already
# built with rocm-device-libs as an external project).
depends_on('llvm-amdgpu ~rocm-device-libs')
for ver in ['3.5.0', '3.7.0', '3.8.0', '3.9.0', '3.10.0', '4.0.0', '4.1.0',
'4.2.0', '4.3.0', '4.3.1', '4.5.0', '4.5.2', 'master']:
depends_on('rocm-cmake@' + ver, type='build', when='@' + ver)
depends_on('llvm-amdgpu@' + ver, when='@' + ver)
def cmake_args(self):
spec = self.spec
return [
self.define('LLVM_DIR', spec['llvm-amdgpu'].prefix),
self.define('CMAKE_C_COMPILER', spec['llvm-amdgpu'].prefix.bin.clang)
]
| 51.163636 | 123 | 0.712154 |
7941d6045c4b9e330b505217ea29784c96ff6a30 | 2,710 | py | Python | elasticutils/contrib/django/tests/__init__.py | peopledoc/elasticutils | 17e3f97b0ea36a7159ce806f48a662a3f8d678b3 | [
"BSD-3-Clause"
] | 59 | 2015-01-04T07:39:01.000Z | 2019-12-27T14:10:32.000Z | elasticutils/contrib/django/tests/__init__.py | tictail/elasticutils | de2987ff0bc2ee15cdfea13899d9c5e1347d75f0 | [
"BSD-3-Clause"
] | 11 | 2015-01-03T17:39:07.000Z | 2019-02-19T16:42:53.000Z | elasticutils/contrib/django/tests/__init__.py | tictail/elasticutils | de2987ff0bc2ee15cdfea13899d9c5e1347d75f0 | [
"BSD-3-Clause"
] | 21 | 2015-01-05T13:14:02.000Z | 2022-02-16T06:44:42.000Z | # We need to put these in a separate module so they're easy to import
# on a test-by-test basis so that we can skip django-requiring tests
# if django isn't installed.
from elasticutils.contrib.django import MappingType, Indexable
_model_cache = []
def reset_model_cache():
del _model_cache[0:]
class Meta(object):
def __init__(self, db_table):
self.db_table = db_table
class SearchQuerySet(object):
# Yes. This is kind of crazy, but ... whatever.
def __init__(self, model):
self.model = model
self.steps = []
def get(self, pk):
pk = int(pk)
return [m for m in _model_cache if m.id == pk][0]
def filter(self, id__in=None):
self.steps.append(('filter', id__in))
return self
def order_by(self, *fields):
self.steps.append(('order_by', fields))
return self
def values_list(self, *args, **kwargs):
self.steps.append(('values_list', args, kwargs.pop('flat', False)))
return self
def __iter__(self):
order_by = None
values_list = None
objs = _model_cache
for mem in self.steps:
if mem[0] == 'filter':
objs = [obj for obj in objs if obj.id in mem[1]]
elif mem[0] == 'order_by':
order_by_field = mem[1][0]
elif mem[0] == 'values_list':
values_list = (mem[1], mem[2])
if order_by:
objs.sort(key=getattr(obj, order_by_field))
if values_list:
# Note: Hard-coded to just id and flat
objs = [obj.id for obj in objs]
return iter(objs)
class Manager(object):
def get_query_set(self):
return SearchQuerySet(self)
def get(self, pk):
return self.get_query_set().get(pk)
def filter(self, *args, **kwargs):
return self.get_query_set().filter(*args, **kwargs)
def order_by(self, *args, **kwargs):
return self.get_query_set().order_by(*args, **kwargs)
def values_list(self, *args, **kwargs):
return self.get_query_set().values_list(*args, **kwargs)
class FakeModel(object):
_meta = Meta('fake')
objects = Manager()
def __init__(self, **kw):
self._doc = kw
for key in kw:
setattr(self, key, kw[key])
_model_cache.append(self)
class FakeDjangoMappingType(MappingType, Indexable):
@classmethod
def get_model(cls):
return FakeModel
@classmethod
def extract_document(cls, obj_id, obj=None):
if obj is None:
raise ValueError('I\'m a dumb mock object and I have no idea '
'what to do with these args.')
return obj._doc
| 25.809524 | 75 | 0.593727 |
7941d667ee65669cbeb144fb621a256dcef07879 | 4,206 | py | Python | setup.py | smacken/mockportfolio | bb7ef73ba19be52e22adf5b22e30fd1e9e0e585f | [
"MIT"
] | null | null | null | setup.py | smacken/mockportfolio | bb7ef73ba19be52e22adf5b22e30fd1e9e0e585f | [
"MIT"
] | 297 | 2019-10-21T17:26:33.000Z | 2021-07-19T17:19:23.000Z | setup.py | smacken/trading-mock-portfolio | bb7ef73ba19be52e22adf5b22e30fd1e9e0e585f | [
"MIT"
] | null | null | null | ''' mock portfolio '''
import os.path
# import codecs # To use a consistent encoding
import setuptools
import re
import time
here = os.path.abspath(os.path.dirname(__file__))
now = str(round(time.time()))
with open("README.md", "r") as f:
desc = f.read()
desc = desc.split("<!-- content -->")[-1]
desc = re.sub("<[^<]+?>", "", desc) # Remove html
def get_property(prop, project):
''' get a project property: from init '''
result = re.search(r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format(prop), open(project + '/__init__.py').read())
return result.group(1)
# Package name
pname = 'mockportfolio'
# Generate linkes
gurl = 'https://github.com/smacken/' + 'trading-mock-portfolio'
setuptools.setup(
name=pname,
version=get_property('__version__', pname) + f'.{now}',
description='Portfolio Engine',
long_description=desc,
long_description_content_type="text/markdown",
# The project's main homepage.
url=gurl,
# Author details
author='Scott Mackenzie',
author_email='[email protected]',
# Choose your license
license='GPLv3+',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Financial and Insurance Industry',
# Indicate which Topics are covered by the package
'Topic :: Software Development',
'Topic :: Office/Business :: Financial',
# Pick your license as you wish (should match "license" above)
('License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)'),
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
# Operating Systems on which it runs
'Operating System :: OS Independent',
],
# What does your project relate to?
keywords=['trading', 'development'],
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=setuptools.find_packages(exclude=['docs', 'docs2', 'samples']),
# packages=['mockportfolio', '],
# List run-time dependencies here.
# These will be installed by pip when your
# project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
# install_requires=['six'],
# List additional groups of dependencies here
# (e.g. development dependencies).
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require={},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={'sample': ['package_data.dat'],},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={'console_scripts': ['sample=sample:main',],},
# scripts=['tools/bt-run.py'],
)
| 35.644068 | 107 | 0.651213 |
7941d728ba3052ec7d68f3352b7aae2b6daea405 | 4,128 | py | Python | san/transform.py | Singularity-DAO/sanpy | 51cffce07bd36548cb1d7eb8dc40136cf075729d | [
"MIT"
] | 1 | 2022-03-21T22:38:43.000Z | 2022-03-21T22:38:43.000Z | san/transform.py | Singularity-DAO/sanpy | 51cffce07bd36548cb1d7eb8dc40136cf075729d | [
"MIT"
] | null | null | null | san/transform.py | Singularity-DAO/sanpy | 51cffce07bd36548cb1d7eb8dc40136cf075729d | [
"MIT"
] | null | null | null | """
In order to have metrics, which require different order, we need to have transform
functions, which reorder or make different dictionaries in general.
"""
import operator
import pandas as pd
from san.pandas_utils import convert_to_datetime_idx_df
from functools import reduce
from collections import OrderedDict
from san.graphql import execute_gql
from san.sanbase_graphql_helper import QUERY_MAPPING
QUERY_PATH_MAP = {
'eth_top_transactions': ['ethTopTransactions'],
'eth_spent_over_time': ['ethSpentOverTime'],
'token_top_transactions': ['tokenTopTransactions'],
'get_metric': ['timeseriesData'],
'topic_search': ['chartData']
}
def path_to_data(idx, query, data):
"""
With this function we jump straight onto the key from the dataframe,
that we want and start from there. We use our future starting points from the QUERY_PATH_MAP.
"""
return reduce(
operator.getitem, [
'query_' + str(idx), ] + QUERY_PATH_MAP[query], data)
def transform_query_result(idx, query, data):
"""
If there is a transforming function for this query, then the result is
passed for it for another transformation
"""
if query in QUERY_PATH_MAP:
result = path_to_data(idx, query, data)
elif query in QUERY_MAPPING:
result = data['query_' + str(idx)]
else:
result = path_to_data(idx, 'get_metric', data)
if query + '_transform' in globals():
result = globals()[query + '_transform'](result)
return convert_to_datetime_idx_df(result)
def eth_top_transactions_transform(data):
return list(map(lambda column: {
'datetime': column['datetime'],
'fromAddress': column['fromAddress']['address'],
'fromAddressIsExchange': column['fromAddress']['isExchange'],
'toAddress': column['toAddress']['address'],
'toAddressIsExchange': column['toAddress']['isExchange'],
'trxHash': column['trxHash'],
'trxValue': column['trxValue']
}, data))
def top_transfers_transform(data):
return list(map(lambda column: {
'datetime': column['datetime'],
'fromAddress': column['fromAddress']['address'],
'toAddress': column['toAddress']['address'],
'trxHash': column['trxHash'],
'trxValue': column['trxValue']
}, data))
def news_transform(data):
result = list(map(lambda column: OrderedDict({
'datetime': column['datetime'],
'title': column['title'],
'description': column['description'],
'sourceName': column['sourceName'],
'url': column['url']
}), data))
return result
def token_top_transactions_transform(data):
return list(map(lambda column: {
'datetime': column['datetime'],
'fromAddress': column['fromAddress']['address'],
'fromAddressIsExchange': column['fromAddress']['isExchange'],
'toAddress': column['toAddress']['address'],
'toAddressIsExchange': column['toAddress']['isExchange'],
'trxHash': column['trxHash'],
'trxValue': column['trxValue']
}, data))
def emerging_trends_transform(data):
result = []
for column in data:
for i in range(0, len(column['topWords'])):
result.append({
'datetime': column['datetime'],
'score': column['topWords'][i]['score'],
'word': column['topWords'][i]['word']
})
result.sort(key=lambda elem: elem['datetime'])
return result
def top_social_gainers_losers_transform(data):
result = []
for column in data:
for i in range(0, len(column['projects'])):
result.append({
'datetime': column['datetime'],
'slug': column['projects'][i]['slug'],
'change': column['projects'][i]['change'],
'status': column['projects'][i]['status'],
})
result = list(map(lambda column: OrderedDict({
'datetime': column['datetime'],
'slug': column['slug'],
'change': column['change'],
'status': column['status']
}), result))
return result
| 32.25 | 97 | 0.626211 |
7941d736623b035efc0ec533604ca0753310c982 | 2,874 | py | Python | transform/stacked_autoencoder_tf.py | kashefy/transform | bb0855d06c878b5015810bbf6e1f9c7b6b1414a3 | [
"BSD-2-Clause"
] | null | null | null | transform/stacked_autoencoder_tf.py | kashefy/transform | bb0855d06c878b5015810bbf6e1f9c7b6b1414a3 | [
"BSD-2-Clause"
] | null | null | null | transform/stacked_autoencoder_tf.py | kashefy/transform | bb0855d06c878b5015810bbf6e1f9c7b6b1414a3 | [
"BSD-2-Clause"
] | null | null | null | '''
Created on May 26, 2017
@author: kashefy
'''
from nideep.nets.abstract_net_tf import AbstractNetTF
from autoencoder_tf import Autoencoder as AE
class StackedAutoencoder(AbstractNetTF):
'''
classdocs
'''
def _init_learning_params_scoped(self):
pass
def _init_ops(self):
pass
def _in_op_cur(self):
in_op = None
if len(self.sae) > 0:
in_op = self.sae[-1].representation()
else:
in_op = self.in_op
return in_op
def stack(self, dim):
if not isinstance(dim, (list, tuple)):
dim = [dim]
for d in dim:
self.dims.append(d)
in_op = self._in_op_cur()
# Network Parameters
ae_params = {
'n_nodes' : dim,
'n_input' : int(in_op.get_shape()[-1]),
'prefix' : '%s-%d' % (self.prefix, len(self.sae)+1),
'reuse' : self.reuse,
'do_denoising' : self.do_denoising,
'input_noise_std' : self.input_noise_std,
}
ae = AE(ae_params)
ae.x = in_op
_, _ = ae.build()
if len(self.sae) > 0:
self.sae[-1].decoder(ae.p)
self.sae.append(ae)
if len(self.sae) == 1:
self.enc_in = self.sae[0].enc_in
# Targets (Labels) are the input data.
self._y_true = self.sae[-1].x
@property
def y_true(self):
return self._y_true
@y_true.setter
def y_true(self, value):
self._y_true = value
def cost(self, name=None):
return self.sae[-1].cost_cross_entropy(self.y_true, name=name)
def vars_new(self):
return self.sae[-1].vars_new()
@property
def representation(self):
return self.sae[-1].representation()
@property
def x(self):
return self.sae[0].x
@property
def p(self):
return self.sae[0].p
@property
def logits(self):
return self.sae[0].logits
@property
def w(self):
w = {}
for ae in self.sae:
for k in ae.w:
w[k] = ae.w[k]
return w
@property
def vars_restored(self):
self._vars_restored = []
for ae in self.sae:
self._vars_restored.extend(ae.vars_restored)
return self._vars_restored
def __init__(self, params):
'''
Constructor
'''
self.dims = []
self.in_op = params['in_op']
params['n_input'] = int(self.in_op.get_shape()[-1])
super(StackedAutoencoder, self).__init__(params)
self.sae = []
self.do_denoising = params.get('do_denoising', False)
self.input_noise_std = params.get('input_noise_std', 0.)
if self.input_noise_std == 0.:
self.do_denoising = False | 26.127273 | 70 | 0.533403 |
7941d74b3a31046e7e3b166d5f847511c699425a | 78 | py | Python | dream/game/__init__.py | icyblade/dream | 818e77f1c25e51f8cd966f7aa4eb1bcd4207b208 | [
"MIT"
] | null | null | null | dream/game/__init__.py | icyblade/dream | 818e77f1c25e51f8cd966f7aa4eb1bcd4207b208 | [
"MIT"
] | null | null | null | dream/game/__init__.py | icyblade/dream | 818e77f1c25e51f8cd966f7aa4eb1bcd4207b208 | [
"MIT"
] | null | null | null | class Game(object):
"""Abstract class of Texas Hold'em game."""
pass
| 15.6 | 47 | 0.628205 |
7941d7629d93b12b7ca31af401c1c19f4609c054 | 6,117 | py | Python | engine/migrations/0001_initial.py | ainterr/scoring_engine | d986eef08dcb819add20ed87d91239f887f62daa | [
"MIT"
] | null | null | null | engine/migrations/0001_initial.py | ainterr/scoring_engine | d986eef08dcb819add20ed87d91239f887f62daa | [
"MIT"
] | 7 | 2016-02-24T21:01:22.000Z | 2017-01-04T03:22:44.000Z | engine/migrations/0001_initial.py | ainterr/scoring_engine | d986eef08dcb819add20ed87d91239f887f62daa | [
"MIT"
] | 2 | 2016-03-04T17:04:48.000Z | 2020-01-30T21:03:49.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-30 19:11
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
],
options={
'verbose_name_plural': 'users',
'verbose_name': 'user',
'abstract': False,
},
),
migrations.CreateModel(
name='Credential',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=20)),
('password', models.CharField(max_length=40)),
('default', models.BooleanField()),
],
),
migrations.CreateModel(
name='Plugin',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Result',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.BooleanField(default=False)),
('plugin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='results', to='engine.Plugin')),
],
),
migrations.CreateModel(
name='Service',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, unique=True)),
('subnet_host', models.PositiveIntegerField()),
('port', models.PositiveIntegerField()),
('plugin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='services', to='engine.Plugin')),
],
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, unique=True)),
('subnet', models.GenericIPAddressField(unique=True)),
('netmask', models.GenericIPAddressField()),
],
),
migrations.AddField(
model_name='result',
name='service',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='results', to='engine.Service'),
),
migrations.AddField(
model_name='result',
name='team',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='results', to='engine.Team'),
),
migrations.AddField(
model_name='credential',
name='services',
field=models.ManyToManyField(related_name='credentials', to='engine.Service'),
),
migrations.AddField(
model_name='credential',
name='team',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='credentials', to='engine.Team'),
),
migrations.AddField(
model_name='user',
name='team',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='users', to='engine.Team'),
),
migrations.AddField(
model_name='user',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
]
| 52.732759 | 329 | 0.614353 |
7941d7bcf6767496d6e77091790a358a3a167815 | 10,355 | py | Python | codes/informed-ID-man-conflict.py | hamedwaezi01/ai | 74257b732eb2457ab98d93e5acd6ce37e8eb1d01 | [
"MIT"
] | 1 | 2020-10-30T08:28:30.000Z | 2020-10-30T08:28:30.000Z | codes/informed-ID-man-conflict.py | hamedwaezi01/ai | 74257b732eb2457ab98d93e5acd6ce37e8eb1d01 | [
"MIT"
] | null | null | null | codes/informed-ID-man-conflict.py | hamedwaezi01/ai | 74257b732eb2457ab98d93e5acd6ce37e8eb1d01 | [
"MIT"
] | null | null | null | '''
Hamed Waezi
AI HW1
IDA*
Heuristic => Manhattan Distance + Linear Conflicts
Cutoff => f(n)
'''
import heapq
import copy
print('Dimensions:')
dim = int(input())
tiles = dim*dim
class Node: # Node is the state
def __init__(self, n, data, pp, blank, g):
self.n = n
self.data = data # A 2D array
self.pp = pp
self.blank = blank
self.hash = None
self.heuristic = None
self.g = g
def __str__(self,):
ret = ''
for rows in self.data:
ret = ret + '\n' + rows.__str__()
return self.data.__str__()
def __hash__(self,):
if self.hash is not None:
return self.hash
hashBase = 67
hashMode = 1e12+7
self.hash = 1
for i in range(0,self.n):
for j in range(0,self.n):
self.hash = self.hash * hashBase
self.hash = self.hash + self.data[i][j]
self.hash = self.hash % hashMode
self.hash = int(self.hash)
return self.hash
def __gt__(self,other):
return self.f() > other.f()
def __lt__(self, other):
return self.f() < other.f()
def __eq__(self, other):
return self.hash == other.hash
def move(self, pp, direction):
if pp is None:
g = 1
else:
g = pp.g + 1
if direction == 0: # UP
newData = copy.deepcopy(self.data)
newData[self.blank[0]][self.blank[1]] = newData[self.blank[0] - 1][self.blank[1]]
newData[self.blank[0] - 1][self.blank[1]] = 0
temp = Node(n=self.n, data=newData, pp=self, blank=(self.blank[0] - 1, self.blank[1]), g=g)
return temp
elif direction == 1: # DOWN
newData = copy.deepcopy(self.data)
newData[self.blank[0]][self.blank[1]] = newData[self.blank[0] + 1][self.blank[1]]
newData[self.blank[0] + 1][self.blank[1]] = 0
temp = Node(n=self.n, data=newData, pp=self, blank=(self.blank[0] + 1, self.blank[1]), g=g)
return temp
elif direction == 2: # RIGHT
newData = copy.deepcopy(self.data)
newData[self.blank[0]][self.blank[1]] = newData[self.blank[0]][self.blank[1] + 1]
newData[self.blank[0]][self.blank[1] + 1] = 0
temp = Node(n=self.n, data=newData, pp=self, blank=(self.blank[0], self.blank[1] + 1), g=g)
return temp
elif direction == 3: # LEFT
newData = copy.deepcopy(self.data)
newData[self.blank[0]][self.blank[1]] = newData[self.blank[0]] [self.blank[1] - 1]
newData[self.blank[0]] [self.blank[1] - 1] = 0
temp = Node(n=self.n, data=newData, pp=self, blank=(self.blank[0], self.blank[1] - 1), g=g)
return temp
def manhattanDistance(self, curr, goal,): # h-value
return abs(curr[0]- goal // self.n) + abs(curr[1] - goal % self.n) # dX + dY
def linearConflicts(self,index):
conflicts = 0
for i in range(0,self.n):
for j in range(i+1, self.n):
# checking Columns
if self.data[i][index] != 0 and self.data[j][index] != 0 and (self.data[i][index] -1) % self.n == index and (self.data[j][index] - 1) % self.n == index and self.data[i][index] > self.data[j][index]:
conflicts = conflicts + 1
# checking Rows
if self.data[index][i] != 0 and self.data[index][j] != 0 and (self.data[index][i]-1) // self.n == index and (self.data[index][j] - 1) // self.n == index and self.data[index][i] > self.data[index][j]:
conflicts = conflicts + 1
return conflicts
def f(self,): # Always call f
if self.heuristic is not None:
return self.heuristic + self.g
heuristic = 0
conflicts=0
for i in range(0, self.n):
conflicts += self.linearConflicts(i)
for j in range(0, self.n):
temp = self.data[i][j] - 1
if self.data[i][j] != 0:
heuristic = heuristic + self.manhattanDistance(curr=(i, j), goal=temp)
self.heuristic = heuristic
self.conflicts = conflicts
return heuristic + self.g + conflicts * 2
class Puzzle: # it is the current puzzle
def countInversions(self,):
dd = []
for i in range (0, self.n):
for j in range(0,self.n):
dd.append(self.root.data[i][j])
inversions = 0
for i in range(0,self.n*self.n-1):
for j in range(i+1, self.n*self.n):
if dd[j] != 0 and dd[i] != 0 and dd[i] > dd[j]:
inversions = inversions + 1
print('# Inversions : '+str(inversions))
return inversions
def isSolvable(self,):
inversions = self.countInversions()
# print(str(inversions)+ ' ' + str((self.root.blank[0] - self.n) % 1))
if self.n % 2 == 1:
return inversions % 2 == 0
else:
return (inversions % 2 == 0 and ((self.root.blank[0] - self.n) % 2 == 1)) or (inversions % 2 == 1 and ((self.root.blank[0] - self.n) % 2 == 0))
def __init__(self, n,): # `n` is the dim of puzzle
self.states = set() # it holds hashes pointing to states
self.root= []
blank = None
self.nodes = []
self.nodesExpanded = 1
self.nodesDeleted = 0
heapq.heapify(self.nodes)
self.n = n
goal = []
for i in range(0,self.n):
temp = []
for j in range(0,self.n):
temp.append(i * self.n + j + 1)
goal.append(temp)
goal[i][j] = 0
goal = Node(n=self.n,data=goal,pp=None,blank=(self.n - 1,self.n - 1), g = 0)
self.goalhash = goal.__hash__()
# print('Input your matrix')
# for i in range(0, self.n):
# temp = input().split()
# temp = list(map(int, temp))
# if len(temp) != self.n:
# raise Exception("Bad Input\n"+"Dimension is: "+str(self.n))
# for j in range(0, self.n):
# if temp[j] == 0:
# blank = (i,j)
# self.root.append(temp)
self.root=[[13,2,10,3],[1,12,8,4],[5,0,9,6],[15,14,11,7]]
blank=(2,1)
#### DEVIL'S CONFIGURATION
#self.root=[[0, 15, 8, 3], [12, 11, 7, 4] ,[14, 10, 5, 6], [9, 13, 2, 1]]
#blank=(0,0)
#####
#self.root=[[3, 4, 8, 12], [7, 5, 10, 14], [0, 1, 6, 15], [2, 9, 13, 11]]
#blank=(2,0)
self.root = Node(n=self.n,data=self.root, pp=None, blank=blank, g=1)
self.solvable = self.isSolvable()
heapq.heappush(self.nodes,self.root)
self.states.add(self.root)
def verify(self, node):
return node.__hash__() == self.goalhash
def __eventLoop__(self,cutoff):
iterations=0
print("Length of states : "+str(len(self.states)) + " & nodes : "+str(len(self.nodes)))
while True:
if len(self.nodes) == 0:
return None, iterations
#print(str(iterations))
bestNode = heapq.heappop(self.nodes)
#print('# expanded : ' + str(self.nodesExpanded) + ' # deleted : '+str(self.nodesDeleted) + ' F: '+str(bestNode.f()))
blank = bestNode.blank
moves = []
if blank[0] > 0:
moves.append(0)
if blank[0] < self.n-1 :
moves.append(1)
if blank[1] > 0 :
moves.append(3)
if blank[1] < self.n-1 :
moves.append(2)
# print('MOVES : '+str(moves))
for i in moves:
newNode = bestNode.move(pp=bestNode, direction=i)
if newNode in self.states or newNode.f() > cutoff:
self.nodesDeleted = self.nodesDeleted + 1
#print ('Expanded : ' + str(self.nodesExpanded) +' deleted : ' + str(self.nodesDeleted) + ' f : ' + str(bestNode.f())+' blank:'+ bestNode.blank.__str__() + ' node :\n'+bestNode.__str__()+' deleting')
del newNode
else:
iterations+=1
self.nodesExpanded = self.nodesExpanded + 1
# if self.nodesExpanded % 5000 == 0:
# print(self.nodesExpanded)
#print ('Expanded : ' + str(self.nodesExpanded) + ' deleted : ' + str(self.nodesDeleted) + ' f : ' + str(bestNode.f())+' blank:'+ bestNode.blank.__str__() + ' node :\n'+bestNode.__str__()+' adding')
# print('f : ' + str(newNode.f()) + ' ' + str(newNode.g))
if self.verify(newNode):
print('Done : ' + str(newNode.f()))
return newNode, iterations
self.states.add(newNode)
heapq.heappush(self.nodes,newNode)
def run(self,):
self.itr = 0
if not self.solvable:
print ('is not solvable')
return None
terminal = None # The leaf of tree which might be the goal
cutoff = 0
iterations=0
while terminal is None:
terminal, iterations=self.__eventLoop__(cutoff)
self.itr += iterations
print(str(self.itr)+' '+ str(self.nodesExpanded)+' '+str(iterations) + ' ' + str(cutoff))
if terminal is not None:
return terminal
cutoff += 1
self.states.clear()
self.nodes.clear()
self.nodes.append(self.root)
if __name__ == '__main__':
fileName = input()
puzzle = Puzzle(n=dim,)
res = puzzle.run()
if res is not None:
f = open(fileName,'w+')
temp = res
f.write(str(dim)+'\n')
badChars = ['[',']']
result = [str(res)]
while temp.pp is not None:
temp = temp.pp
result.append(str(temp))
for i in range(len(result)-1,-1,-1):
temp= result[i].replace('[','')
temp= temp.replace(']','')
f.write(temp+'\n')
f.write('NodesExpanded: '+str(puzzle.nodesExpanded))
f.close()
| 38.069853 | 220 | 0.499083 |
7941d88daf9ae8f320d610a717cdeed4551427bd | 2,065 | py | Python | src/panoptoindexconnector/implementations/debug_implementation.py | Panopto/panopto-index-connector | 8fc5597c42e4dd4ee0001ec8d1b67750cf5eab1f | [
"Apache-2.0"
] | 2 | 2020-09-30T06:42:34.000Z | 2021-11-05T15:19:06.000Z | src/panoptoindexconnector/implementations/debug_implementation.py | Panopto/panopto-index-connector | 8fc5597c42e4dd4ee0001ec8d1b67750cf5eab1f | [
"Apache-2.0"
] | 4 | 2021-08-03T21:29:21.000Z | 2022-03-11T19:51:51.000Z | src/panoptoindexconnector/implementations/debug_implementation.py | Panopto/panopto-index-connector | 8fc5597c42e4dd4ee0001ec8d1b67750cf5eab1f | [
"Apache-2.0"
] | 2 | 2020-09-29T15:17:42.000Z | 2021-08-02T04:04:36.000Z | """
Methods for the connector application to convert and sync content to the target endpoint
Implement these methods for the connector application
"""
# Standard Library Imports
import json
import logging
import os
# Third party
# import requests
# Global constants
DIR = os.path.dirname(os.path.realpath(__file__))
LOG = logging.getLogger(__name__)
#########################################################################
#
# Exported methods to implement
#
#########################################################################
# since this is debug, we'll disable using all the args
# pylint: disable=unused-argument
def convert_to_target(panopto_content, config):
"""
Implement this method to convert to target format
"""
field_mapping = config.field_mapping
LOG.info('Received the following panopto content: %s', json.dumps(panopto_content, indent=2))
target_content = {'id': panopto_content['Id']}
target_content['fields'] = {
field: panopto_content['VideoContent'][key]
for key, field in field_mapping['Metadata'].items()
}
target_content['fields'].update({
field: panopto_content['VideoContent'][key]
for key, field in field_mapping['Info'].items()
})
# Principals
target_content['permissions'] = [
{
'principal': {
'name': principal.get('Username', principal.get('Groupname')),
'type': 'user' if principal.get('Username') else 'group'
},
'readable': True
}
for principal in panopto_content['VideoContent']['Principals']
]
return target_content
def push_to_target(target_content, config):
"""
Implement this method to push converted content to the target
"""
LOG.info('Would push the following to target: %s', json.dumps(target_content, indent=2))
def delete_from_target(video_id, config):
"""
Implement this method to push converted content to the target
"""
LOG.info('Would delete the following target: %s', video_id)
| 26.139241 | 97 | 0.621792 |
7941d8d7297023314ec46851ba8569eb75c28abc | 112,254 | py | Python | src/metpy/calc/thermo.py | gerritholl/MetPy | 3f08b770485835982989f34aedb87791af250301 | [
"BSD-3-Clause"
] | null | null | null | src/metpy/calc/thermo.py | gerritholl/MetPy | 3f08b770485835982989f34aedb87791af250301 | [
"BSD-3-Clause"
] | null | null | null | src/metpy/calc/thermo.py | gerritholl/MetPy | 3f08b770485835982989f34aedb87791af250301 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2008,2015,2016,2017,2018,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Contains a collection of thermodynamic calculations."""
import contextlib
import warnings
import numpy as np
import scipy.integrate as si
import scipy.optimize as so
import xarray as xr
from .tools import (_greater_or_close, _less_or_close, _remove_nans, find_bounding_indices,
find_intersections, first_derivative, get_layer)
from .. import constants as mpconsts
from ..cbook import broadcast_indices
from ..interpolate.one_dimension import interpolate_1d
from ..package_tools import Exporter
from ..units import check_units, concatenate, units
from ..xarray import add_vertical_dim_from_xarray, preprocess_and_wrap
exporter = Exporter(globals())
sat_pressure_0c = units.Quantity(6.112, 'millibar')
@exporter.export
@preprocess_and_wrap(wrap_like='temperature', broadcast=('temperature', 'dewpoint'))
@check_units('[temperature]', '[temperature]')
def relative_humidity_from_dewpoint(temperature, dewpoint):
r"""Calculate the relative humidity.
Uses temperature and dewpoint to calculate relative humidity as the ratio of vapor
pressure to saturation vapor pressures.
Parameters
----------
temperature : `pint.Quantity`
Air temperature
dewpoint : `pint.Quantity`
Dewpoint temperature
Returns
-------
`pint.Quantity`
Relative humidity
.. versionchanged:: 1.0
Renamed ``dewpt`` parameter to ``dewpoint``
See Also
--------
saturation_vapor_pressure
"""
e = saturation_vapor_pressure(dewpoint)
e_s = saturation_vapor_pressure(temperature)
return (e / e_s)
@exporter.export
@preprocess_and_wrap(wrap_like='pressure')
@check_units('[pressure]', '[pressure]')
def exner_function(pressure, reference_pressure=mpconsts.P0):
r"""Calculate the Exner function.
.. math:: \Pi = \left( \frac{p}{p_0} \right)^\kappa
This can be used to calculate potential temperature from temperature (and visa-versa),
since:
.. math:: \Pi = \frac{T}{\theta}
Parameters
----------
pressure : `pint.Quantity`
Total atmospheric pressure
reference_pressure : `pint.Quantity`, optional
The reference pressure against which to calculate the Exner function, defaults to
metpy.constants.P0
Returns
-------
`pint.Quantity`
Value of the Exner function at the given pressure
See Also
--------
potential_temperature
temperature_from_potential_temperature
"""
return (pressure / reference_pressure).to('dimensionless')**mpconsts.kappa
@exporter.export
@preprocess_and_wrap(wrap_like='temperature', broadcast=('pressure', 'temperature'))
@check_units('[pressure]', '[temperature]')
def potential_temperature(pressure, temperature):
r"""Calculate the potential temperature.
Uses the Poisson equation to calculation the potential temperature
given `pressure` and `temperature`.
Parameters
----------
pressure : `pint.Quantity`
Total atmospheric pressure
temperature : `pint.Quantity`
Air temperature
Returns
-------
`pint.Quantity`
Potential temperature corresponding to the temperature and pressure
See Also
--------
dry_lapse
Notes
-----
Formula:
.. math:: \Theta = T (P_0 / P)^\kappa
Examples
--------
>>> from metpy.units import units
>>> metpy.calc.potential_temperature(800. * units.mbar, 273. * units.kelvin)
<Quantity(290.972015, 'kelvin')>
"""
return temperature / exner_function(pressure)
@exporter.export
@preprocess_and_wrap(
wrap_like='potential_temperature',
broadcast=('pressure', 'potential_temperature')
)
@check_units('[pressure]', '[temperature]')
def temperature_from_potential_temperature(pressure, potential_temperature):
r"""Calculate the temperature from a given potential temperature.
Uses the inverse of the Poisson equation to calculate the temperature from a
given potential temperature at a specific pressure level.
Parameters
----------
pressure : `pint.Quantity`
Total atmospheric pressure
potential_temperature : `pint.Quantity`
Potential temperature
Returns
-------
`pint.Quantity`
Temperature corresponding to the potential temperature and pressure
See Also
--------
dry_lapse
potential_temperature
Notes
-----
Formula:
.. math:: T = \Theta (P / P_0)^\kappa
Examples
--------
>>> from metpy.units import units
>>> from metpy.calc import temperature_from_potential_temperature
>>> # potential temperature
>>> theta = np.array([ 286.12859679, 288.22362587]) * units.kelvin
>>> p = 850 * units.mbar
>>> T = temperature_from_potential_temperature(p, theta)
.. versionchanged:: 1.0
Renamed ``theta`` parameter to ``potential_temperature``
"""
return potential_temperature * exner_function(pressure)
@exporter.export
@preprocess_and_wrap(
wrap_like='temperature',
broadcast=('pressure', 'temperature', 'reference_pressure')
)
@check_units('[pressure]', '[temperature]', '[pressure]')
def dry_lapse(pressure, temperature, reference_pressure=None, vertical_dim=0):
r"""Calculate the temperature at a level assuming only dry processes.
This function lifts a parcel starting at ``temperature``, conserving
potential temperature. The starting pressure can be given by ``reference_pressure``.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure level(s) of interest
temperature : `pint.Quantity`
Starting temperature
reference_pressure : `pint.Quantity`, optional
Reference pressure; if not given, it defaults to the first element of the
pressure array.
Returns
-------
`pint.Quantity`
The parcel's resulting temperature at levels given by ``pressure``
See Also
--------
moist_lapse : Calculate parcel temperature assuming liquid saturation processes
parcel_profile : Calculate complete parcel profile
potential_temperature
Notes
-----
Only reliably functions on 1D profiles (not higher-dimension vertical cross sections or
grids) unless reference_pressure is specified.
.. versionchanged:: 1.0
Renamed ``ref_pressure`` parameter to ``reference_pressure``
"""
if reference_pressure is None:
reference_pressure = pressure[0]
return temperature * (pressure / reference_pressure)**mpconsts.kappa
@exporter.export
@preprocess_and_wrap(
wrap_like='temperature',
broadcast=('pressure', 'temperature', 'reference_pressure')
)
@check_units('[pressure]', '[temperature]', '[pressure]')
def moist_lapse(pressure, temperature, reference_pressure=None):
r"""Calculate the temperature at a level assuming liquid saturation processes.
This function lifts a parcel starting at `temperature`. The starting pressure can
be given by `reference_pressure`. Essentially, this function is calculating moist
pseudo-adiabats.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure level(s) of interest
temperature : `pint.Quantity`
Starting temperature
reference_pressure : `pint.Quantity`, optional
Reference pressure; if not given, it defaults to the first element of the
pressure array.
Returns
-------
`pint.Quantity`
The resulting parcel temperature at levels given by `pressure`
See Also
--------
dry_lapse : Calculate parcel temperature assuming dry adiabatic processes
parcel_profile : Calculate complete parcel profile
Notes
-----
This function is implemented by integrating the following differential
equation:
.. math:: \frac{dT}{dP} = \frac{1}{P} \frac{R_d T + L_v r_s}
{C_{pd} + \frac{L_v^2 r_s \epsilon}{R_d T^2}}
This equation comes from [Bakhshaii2013]_.
Only reliably functions on 1D profiles (not higher-dimension vertical cross sections or
grids).
.. versionchanged:: 1.0
Renamed ``ref_pressure`` parameter to ``reference_pressure``
"""
def dt(t, p):
t = units.Quantity(t, temperature.units)
p = units.Quantity(p, pressure.units)
rs = saturation_mixing_ratio(p, t)
frac = ((mpconsts.Rd * t + mpconsts.Lv * rs)
/ (mpconsts.Cp_d + (mpconsts.Lv * mpconsts.Lv * rs * mpconsts.epsilon
/ (mpconsts.Rd * t * t)))).to('kelvin')
return (frac / p).magnitude
pressure = np.atleast_1d(pressure)
if reference_pressure is None:
reference_pressure = pressure[0]
if np.isnan(reference_pressure):
return units.Quantity(np.full(pressure.shape, np.nan), temperature.units)
pressure = pressure.to('mbar')
reference_pressure = reference_pressure.to('mbar')
temperature = np.atleast_1d(temperature)
side = 'left'
pres_decreasing = (pressure[0] > pressure[-1])
if pres_decreasing:
# Everything is easier if pressures are in increasing order
pressure = pressure[::-1]
side = 'right'
ref_pres_idx = np.searchsorted(pressure.m, reference_pressure.m, side=side)
ret_temperatures = np.empty((0, temperature.shape[0]))
if _greater_or_close(reference_pressure, pressure.min()):
# Integrate downward in pressure
pres_down = np.append(reference_pressure.m, pressure[(ref_pres_idx - 1)::-1].m)
trace_down = si.odeint(dt, temperature.m.squeeze(), pres_down.squeeze())
ret_temperatures = np.concatenate((ret_temperatures, trace_down[:0:-1]))
if reference_pressure < pressure.max():
# Integrate upward in pressure
pres_up = np.append(reference_pressure.m, pressure[ref_pres_idx:].m)
trace_up = si.odeint(dt, temperature.m.squeeze(), pres_up.squeeze())
ret_temperatures = np.concatenate((ret_temperatures, trace_up[1:]))
if pres_decreasing:
ret_temperatures = ret_temperatures[::-1]
return units.Quantity(ret_temperatures.T.squeeze(), temperature.units)
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]', '[temperature]', '[temperature]')
def lcl(pressure, temperature, dewpoint, max_iters=50, eps=1e-5):
r"""Calculate the lifted condensation level (LCL) from the starting point.
The starting state for the parcel is defined by `temperature`, `dewpoint`,
and `pressure`. If these are arrays, this function will return a LCL
for every index. This function does work with surface grids as a result.
Parameters
----------
pressure : `pint.Quantity`
Starting atmospheric pressure
temperature : `pint.Quantity`
Starting temperature
dewpoint : `pint.Quantity`
Starting dewpoint
Returns
-------
`pint.Quantity`
LCL pressure
`pint.Quantity`
LCL temperature
Other Parameters
----------------
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired relative error in the calculated value, defaults to 1e-5.
See Also
--------
parcel_profile
Notes
-----
This function is implemented using an iterative approach to solve for the
LCL. The basic algorithm is:
1. Find the dewpoint from the LCL pressure and starting mixing ratio
2. Find the LCL pressure from the starting temperature and dewpoint
3. Iterate until convergence
The function is guaranteed to finish by virtue of the `max_iters` counter.
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
Since this function returns scalar values when given a profile, this will return Pint
Quantities even when given xarray DataArray profiles.
.. versionchanged:: 1.0
Renamed ``dewpt`` parameter to ``dewpoint``
"""
def _lcl_iter(p, p0, w, t):
nonlocal nan_mask
td = globals()['dewpoint'](vapor_pressure(units.Quantity(p, pressure.units), w))
p_new = (p0 * (td / t) ** (1. / mpconsts.kappa)).m
nan_mask = nan_mask | np.isnan(p_new)
return np.where(np.isnan(p_new), p, p_new)
# Handle nans by creating a mask that gets set by our _lcl_iter function if it
# ever encounters a nan, at which point pressure is set to p, stopping iteration.
nan_mask = False
w = mixing_ratio(saturation_vapor_pressure(dewpoint), pressure)
lcl_p = so.fixed_point(_lcl_iter, pressure.m, args=(pressure.m, w, temperature),
xtol=eps, maxiter=max_iters)
lcl_p = np.where(nan_mask, np.nan, lcl_p)
# np.isclose needed if surface is LCL due to precision error with np.log in dewpoint.
# Causes issues with parcel_profile_with_lcl if removed. Issue #1187
lcl_p = units.Quantity(np.where(np.isclose(lcl_p, pressure.m), pressure.m, lcl_p),
pressure.units)
return lcl_p, globals()['dewpoint'](vapor_pressure(lcl_p, w)).to(temperature.units)
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')
def lfc(pressure, temperature, dewpoint, parcel_temperature_profile=None, dewpoint_start=None,
which='top'):
r"""Calculate the level of free convection (LFC).
This works by finding the first intersection of the ideal parcel path and
the measured parcel temperature. If this intersection occurs below the LCL,
the LFC is determined to be the same as the LCL, based upon the conditions
set forth in [USAF1990]_, pg 4-14, where a parcel must be lifted dry adiabatically
to saturation before it can freely rise.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure
temperature : `pint.Quantity`
Temperature at the levels given by `pressure`
dewpoint : `pint.Quantity`
Dewpoint at the levels given by `pressure`
parcel_temperature_profile: `pint.Quantity`, optional
The parcel's temperature profile from which to calculate the LFC. Defaults to the
surface parcel profile.
dewpoint_start: `pint.Quantity`, optional
Dewpoint of the parcel for which to calculate the LFC. Defaults to the surface
dewpoint.
which: str, optional
Pick which LFC to return. Options are 'top', 'bottom', 'wide', 'most_cape', and 'all';
'top' returns the lowest-pressure LFC (default),
'bottom' returns the highest-pressure LFC,
'wide' returns the LFC whose corresponding EL is farthest away,
'most_cape' returns the LFC that results in the most CAPE in the profile.
Returns
-------
`pint.Quantity`
LFC pressure, or array of same if which='all'
`pint.Quantity`
LFC temperature, or array of same if which='all'
See Also
--------
parcel_profile
Notes
-----
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
Since this function returns scalar values when given a profile, this will return Pint
Quantities even when given xarray DataArray profiles.
.. versionchanged:: 1.0
Renamed ``dewpt``,``dewpoint_start`` parameters to ``dewpoint``, ``dewpoint_start``
"""
pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
# Default to surface parcel if no profile or starting pressure level is given
if parcel_temperature_profile is None:
new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpoint)
pressure, temperature, dewpoint, parcel_temperature_profile = new_stuff
parcel_temperature_profile = parcel_temperature_profile.to(temperature.units)
if dewpoint_start is None:
dewpoint_start = dewpoint[0]
# The parcel profile and data may have the same first data point.
# If that is the case, ignore that point to get the real first
# intersection for the LFC calculation. Use logarithmic interpolation.
if np.isclose(parcel_temperature_profile[0].to(temperature.units).m, temperature[0].m):
x, y = find_intersections(pressure[1:], parcel_temperature_profile[1:],
temperature[1:], direction='increasing', log_x=True)
else:
x, y = find_intersections(pressure, parcel_temperature_profile,
temperature, direction='increasing', log_x=True)
# Compute LCL for this parcel for future comparisons
this_lcl = lcl(pressure[0], parcel_temperature_profile[0], dewpoint_start)
# The LFC could:
# 1) Not exist
# 2) Exist but be equal to the LCL
# 3) Exist and be above the LCL
# LFC does not exist or is LCL
if len(x) == 0:
# Is there any positive area above the LCL?
mask = pressure < this_lcl[0]
if np.all(_less_or_close(parcel_temperature_profile[mask], temperature[mask])):
# LFC doesn't exist
x = units.Quantity(np.nan, pressure.units)
y = units.Quantity(np.nan, temperature.units)
else: # LFC = LCL
x, y = this_lcl
return x, y
# LFC exists. Make sure it is no lower than the LCL
else:
idx = x < this_lcl[0]
# LFC height < LCL height, so set LFC = LCL
if not any(idx):
el_pressure, _ = find_intersections(pressure[1:], parcel_temperature_profile[1:],
temperature[1:], direction='decreasing',
log_x=True)
if np.min(el_pressure) > this_lcl[0]:
x = units.Quantity(np.nan, pressure.units)
y = units.Quantity(np.nan, temperature.units)
else:
x, y = this_lcl
return x, y
# Otherwise, find all LFCs that exist above the LCL
# What is returned depends on which flag as described in the docstring
else:
return _multiple_el_lfc_options(x, y, idx, which, pressure,
parcel_temperature_profile, temperature,
dewpoint, intersect_type='LFC')
def _multiple_el_lfc_options(intersect_pressures, intersect_temperatures, valid_x,
which, pressure, parcel_temperature_profile, temperature,
dewpoint, intersect_type):
"""Choose which ELs and LFCs to return from a sounding."""
p_list, t_list = intersect_pressures[valid_x], intersect_temperatures[valid_x]
if which == 'all':
x, y = p_list, t_list
elif which == 'bottom':
x, y = p_list[0], t_list[0]
elif which == 'top':
x, y = p_list[-1], t_list[-1]
elif which == 'wide':
x, y = _wide_option(intersect_type, p_list, t_list, pressure,
parcel_temperature_profile, temperature)
elif which == 'most_cape':
x, y = _most_cape_option(intersect_type, p_list, t_list, pressure, temperature,
dewpoint, parcel_temperature_profile)
else:
raise ValueError('Invalid option for "which". Valid options are "top", "bottom", '
'"wide", "most_cape", and "all".')
return x, y
def _wide_option(intersect_type, p_list, t_list, pressure, parcel_temperature_profile,
temperature):
"""Calculate the LFC or EL that produces the greatest distance between these points."""
# zip the LFC and EL lists together and find greatest difference
if intersect_type == 'LFC':
# Find EL intersection pressure values
lfc_p_list = p_list
el_p_list, _ = find_intersections(pressure[1:], parcel_temperature_profile[1:],
temperature[1:], direction='decreasing',
log_x=True)
else: # intersect_type == 'EL'
el_p_list = p_list
# Find LFC intersection pressure values
lfc_p_list, _ = find_intersections(pressure, parcel_temperature_profile,
temperature, direction='increasing',
log_x=True)
diff = [lfc_p.m - el_p.m for lfc_p, el_p in zip(lfc_p_list, el_p_list)]
return (p_list[np.where(diff == np.max(diff))][0],
t_list[np.where(diff == np.max(diff))][0])
def _most_cape_option(intersect_type, p_list, t_list, pressure, temperature, dewpoint,
parcel_temperature_profile):
"""Calculate the LFC or EL that produces the most CAPE in the profile."""
# Need to loop through all possible combinations of cape, find greatest cape profile
cape_list, pair_list = [], []
for which_lfc in ['top', 'bottom']:
for which_el in ['top', 'bottom']:
cape, _ = cape_cin(pressure, temperature, dewpoint, parcel_temperature_profile,
which_lfc=which_lfc, which_el=which_el)
cape_list.append(cape.m)
pair_list.append([which_lfc, which_el])
(lfc_chosen, el_chosen) = pair_list[np.where(cape_list == np.max(cape_list))[0][0]]
if intersect_type == 'LFC':
if lfc_chosen == 'top':
x, y = p_list[-1], t_list[-1]
else: # 'bottom' is returned
x, y = p_list[0], t_list[0]
else: # EL is returned
if el_chosen == 'top':
x, y = p_list[-1], t_list[-1]
else:
x, y = p_list[0], t_list[0]
return x, y
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')
def el(pressure, temperature, dewpoint, parcel_temperature_profile=None, which='top'):
r"""Calculate the equilibrium level.
This works by finding the last intersection of the ideal parcel path and
the measured environmental temperature. If there is one or fewer intersections, there is
no equilibrium level.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
temperature : `pint.Quantity`
Temperature at the levels given by `pressure`
dewpoint : `pint.Quantity`
Dewpoint at the levels given by `pressure`
parcel_temperature_profile: `pint.Quantity`, optional
The parcel's temperature profile from which to calculate the EL. Defaults to the
surface parcel profile.
which: str, optional
Pick which LFC to return. Options are 'top', 'bottom', 'wide', 'most_cape', and 'all'.
'top' returns the lowest-pressure EL, default.
'bottom' returns the highest-pressure EL.
'wide' returns the EL whose corresponding LFC is farthest away.
'most_cape' returns the EL that results in the most CAPE in the profile.
Returns
-------
`pint.Quantity`
EL pressure, or array of same if which='all'
`pint.Quantity`
EL temperature, or array of same if which='all'
See Also
--------
parcel_profile
Notes
-----
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
Since this function returns scalar values when given a profile, this will return Pint
Quantities even when given xarray DataArray profiles.
.. versionchanged:: 1.0
Renamed ``dewpt`` parameter to ``dewpoint``
"""
pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
# Default to surface parcel if no profile or starting pressure level is given
if parcel_temperature_profile is None:
new_stuff = parcel_profile_with_lcl(pressure, temperature, dewpoint)
pressure, temperature, dewpoint, parcel_temperature_profile = new_stuff
parcel_temperature_profile = parcel_temperature_profile.to(temperature.units)
# If the top of the sounding parcel is warmer than the environment, there is no EL
if parcel_temperature_profile[-1] > temperature[-1]:
return (units.Quantity(np.nan, pressure.units),
units.Quantity(np.nan, temperature.units))
# Interpolate in log space to find the appropriate pressure - units have to be stripped
# and reassigned to allow np.log() to function properly.
x, y = find_intersections(pressure[1:], parcel_temperature_profile[1:], temperature[1:],
direction='decreasing', log_x=True)
lcl_p, _ = lcl(pressure[0], temperature[0], dewpoint[0])
idx = x < lcl_p
if len(x) > 0 and x[-1] < lcl_p:
return _multiple_el_lfc_options(x, y, idx, which, pressure,
parcel_temperature_profile, temperature, dewpoint,
intersect_type='EL')
else:
return (units.Quantity(np.nan, pressure.units),
units.Quantity(np.nan, temperature.units))
@exporter.export
@preprocess_and_wrap(wrap_like='pressure')
@check_units('[pressure]', '[temperature]', '[temperature]')
def parcel_profile(pressure, temperature, dewpoint):
r"""Calculate the profile a parcel takes through the atmosphere.
The parcel starts at `temperature`, and `dewpoint`, lifted up
dry adiabatically to the LCL, and then moist adiabatically from there.
`pressure` specifies the pressure levels for the profile.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure level(s) of interest. This array must be from
high to low pressure.
temperature : `pint.Quantity`
Starting temperature
dewpoint : `pint.Quantity`
Starting dewpoint
Returns
-------
`pint.Quantity`
The parcel's temperatures at the specified pressure levels
See Also
--------
lcl, moist_lapse, dry_lapse
Notes
-----
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
.. versionchanged:: 1.0
Renamed ``dewpt`` parameter to ``dewpoint``
"""
_, _, _, t_l, _, t_u = _parcel_profile_helper(pressure, temperature, dewpoint)
return concatenate((t_l, t_u))
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]', '[temperature]', '[temperature]')
def parcel_profile_with_lcl(pressure, temperature, dewpoint):
r"""Calculate the profile a parcel takes through the atmosphere.
The parcel starts at `temperature`, and `dewpoint`, lifted up
dry adiabatically to the LCL, and then moist adiabatically from there.
`pressure` specifies the pressure levels for the profile. This function returns
a profile that includes the LCL.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure level(s) of interest. This array must be from
high to low pressure.
temperature : `pint.Quantity`
Atmospheric temperature at the levels in `pressure`. The first entry should be at
the same level as the first `pressure` data point.
dewpoint : `pint.Quantity`
Atmospheric dewpoint at the levels in `pressure`. The first entry should be at
the same level as the first `pressure` data point.
Returns
-------
pressure : `pint.Quantity`
The parcel profile pressures, which includes the specified levels and the LCL
ambient_temperature : `pint.Quantity`
Atmospheric temperature values, including the value interpolated to the LCL level
ambient_dew_point : `pint.Quantity`
Atmospheric dewpoint values, including the value interpolated to the LCL level
profile_temperature : `pint.Quantity`
The parcel profile temperatures at all of the levels in the returned pressures array,
including the LCL
See Also
--------
lcl, moist_lapse, dry_lapse, parcel_profile, parcel_profile_with_lcl_as_dataset
Notes
-----
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
Also, will only return Pint Quantities, even when given xarray DataArray profiles. To
obtain a xarray Dataset instead, use `parcel_profile_with_lcl_as_dataset` instead.
.. versionchanged:: 1.0
Renamed ``dewpt`` parameter to ``dewpoint``
"""
p_l, p_lcl, p_u, t_l, t_lcl, t_u = _parcel_profile_helper(pressure, temperature[0],
dewpoint[0])
new_press = concatenate((p_l, p_lcl, p_u))
prof_temp = concatenate((t_l, t_lcl, t_u))
new_temp = _insert_lcl_level(pressure, temperature, p_lcl)
new_dewp = _insert_lcl_level(pressure, dewpoint, p_lcl)
return new_press, new_temp, new_dewp, prof_temp
@exporter.export
def parcel_profile_with_lcl_as_dataset(pressure, temperature, dewpoint):
r"""Calculate the profile a parcel takes through the atmosphere, returning a Dataset.
The parcel starts at `temperature`, and `dewpoint`, lifted up
dry adiabatically to the LCL, and then moist adiabatically from there.
`pressure` specifies the pressure levels for the profile. This function returns
a profile that includes the LCL.
Parameters
----------
pressure : `pint.Quantity`
The atmospheric pressure level(s) of interest. This array must be from
high to low pressure.
temperature : `pint.Quantity`
The atmospheric temperature at the levels in `pressure`. The first entry should be at
the same level as the first `pressure` data point.
dewpoint : `pint.Quantity`
The atmospheric dewpoint at the levels in `pressure`. The first entry should be at
the same level as the first `pressure` data point.
Returns
-------
profile : `xarray.Dataset`
The interpolated profile with three data variables: ambient_temperature,
ambient_dew_point, and profile_temperature, all of which are on an isobaric
coordinate.
See Also
--------
lcl, moist_lapse, dry_lapse, parcel_profile, parcel_profile_with_lcl
Notes
-----
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
"""
p, ambient_temperature, ambient_dew_point, profile_temperature = parcel_profile_with_lcl(
pressure,
temperature,
dewpoint
)
return xr.Dataset(
{
'ambient_temperature': (
('isobaric',),
ambient_temperature,
{'standard_name': 'air_temperature'}
),
'ambient_dew_point': (
('isobaric',),
ambient_dew_point,
{'standard_name': 'dew_point_temperature'}
),
'parcel_temperature': (
('isobaric',),
profile_temperature,
{'long_name': 'air_temperature_of_lifted_parcel'}
)
},
coords={
'isobaric': (
'isobaric',
p.m,
{'units': str(p.units), 'standard_name': 'air_pressure'}
)
}
)
def _parcel_profile_helper(pressure, temperature, dewpoint):
"""Help calculate parcel profiles.
Returns the temperature and pressure, above, below, and including the LCL. The
other calculation functions decide what to do with the pieces.
"""
# Find the LCL
press_lcl, temp_lcl = lcl(pressure[0], temperature, dewpoint)
press_lcl = press_lcl.to(pressure.units)
# Find the dry adiabatic profile, *including* the LCL. We need >= the LCL in case the
# LCL is included in the levels. It's slightly redundant in that case, but simplifies
# the logic for removing it later.
press_lower = concatenate((pressure[pressure >= press_lcl], press_lcl))
temp_lower = dry_lapse(press_lower, temperature)
# If the pressure profile doesn't make it to the lcl, we can stop here
if _greater_or_close(np.nanmin(pressure), press_lcl):
return (press_lower[:-1], press_lcl, units.Quantity(np.array([]), press_lower.units),
temp_lower[:-1], temp_lcl, units.Quantity(np.array([]), temp_lower.units))
# Find moist pseudo-adiabatic profile starting at the LCL
press_upper = concatenate((press_lcl, pressure[pressure < press_lcl]))
temp_upper = moist_lapse(press_upper, temp_lower[-1]).to(temp_lower.units)
# Return profile pieces
return (press_lower[:-1], press_lcl, press_upper[1:],
temp_lower[:-1], temp_lcl, temp_upper[1:])
def _insert_lcl_level(pressure, temperature, lcl_pressure):
"""Insert the LCL pressure into the profile."""
interp_temp = interpolate_1d(lcl_pressure, pressure, temperature)
# Pressure needs to be increasing for searchsorted, so flip it and then convert
# the index back to the original array
loc = pressure.size - pressure[::-1].searchsorted(lcl_pressure)
return units.Quantity(np.insert(temperature.m, loc, interp_temp.m), temperature.units)
@exporter.export
@preprocess_and_wrap(wrap_like='mixing_ratio', broadcast=('pressure', 'mixing_ratio'))
@check_units('[pressure]', '[dimensionless]')
def vapor_pressure(pressure, mixing_ratio):
r"""Calculate water vapor (partial) pressure.
Given total ``pressure`` and water vapor ``mixing_ratio``, calculates the
partial pressure of water vapor.
Parameters
----------
pressure : `pint.Quantity`
Total atmospheric pressure
mixing_ratio : `pint.Quantity`
Dimensionless mass mixing ratio
Returns
-------
`pint.Quantity`
Ambient water vapor (partial) pressure in the same units as ``pressure``
Notes
-----
This function is a straightforward implementation of the equation given in many places,
such as [Hobbs1977]_ pg.71:
.. math:: e = p \frac{r}{r + \epsilon}
.. versionchanged:: 1.0
Renamed ``mixing`` parameter to ``mixing_ratio``
See Also
--------
saturation_vapor_pressure, dewpoint
"""
return pressure * mixing_ratio / (mpconsts.epsilon + mixing_ratio)
@exporter.export
@preprocess_and_wrap(wrap_like='temperature')
@check_units('[temperature]')
def saturation_vapor_pressure(temperature):
r"""Calculate the saturation water vapor (partial) pressure.
Parameters
----------
temperature : `pint.Quantity`
Air temperature
Returns
-------
`pint.Quantity`
Saturation water vapor (partial) pressure
See Also
--------
vapor_pressure, dewpoint
Notes
-----
Instead of temperature, dewpoint may be used in order to calculate
the actual (ambient) water vapor (partial) pressure.
The formula used is that from [Bolton1980]_ for T in degrees Celsius:
.. math:: 6.112 e^\frac{17.67T}{T + 243.5}
"""
# Converted from original in terms of C to use kelvin. Using raw absolute values of C in
# a formula plays havoc with units support.
return sat_pressure_0c * np.exp(17.67 * (temperature - units.Quantity(273.15, 'kelvin'))
/ (temperature - units.Quantity(29.65, 'kelvin')))
@exporter.export
@preprocess_and_wrap(wrap_like='temperature', broadcast=('temperature', 'relative_humidity'))
@check_units('[temperature]', '[dimensionless]')
def dewpoint_from_relative_humidity(temperature, relative_humidity):
r"""Calculate the ambient dewpoint given air temperature and relative humidity.
Parameters
----------
temperature : `pint.Quantity`
Air temperature
relative_humidity : `pint.Quantity`
Relative humidity expressed as a ratio in the range 0 < relative_humidity <= 1
Returns
-------
`pint.Quantity`
Dewpoint temperature
.. versionchanged:: 1.0
Renamed ``rh`` parameter to ``relative_humidity``
See Also
--------
dewpoint, saturation_vapor_pressure
"""
if np.any(relative_humidity > 1.2):
warnings.warn('Relative humidity >120%, ensure proper units.')
return dewpoint(relative_humidity * saturation_vapor_pressure(temperature))
@exporter.export
@preprocess_and_wrap(wrap_like='vapor_pressure')
@check_units('[pressure]')
def dewpoint(vapor_pressure):
r"""Calculate the ambient dewpoint given the vapor pressure.
Parameters
----------
e : `pint.Quantity`
Water vapor partial pressure
Returns
-------
`pint.Quantity`
Dewpoint temperature
See Also
--------
dewpoint_from_relative_humidity, saturation_vapor_pressure, vapor_pressure
Notes
-----
This function inverts the [Bolton1980]_ formula for saturation vapor
pressure to instead calculate the temperature. This yield the following
formula for dewpoint in degrees Celsius:
.. math:: T = \frac{243.5 log(e / 6.112)}{17.67 - log(e / 6.112)}
.. versionchanged:: 1.0
Renamed ``e`` parameter to ``vapor_pressure``
"""
val = np.log(vapor_pressure / sat_pressure_0c)
return (units.Quantity(0., 'degC')
+ units.Quantity(243.5, 'delta_degC') * val / (17.67 - val))
@exporter.export
@preprocess_and_wrap(wrap_like='partial_press', broadcast=('partial_press', 'total_press'))
@check_units('[pressure]', '[pressure]', '[dimensionless]')
def mixing_ratio(partial_press, total_press, molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate the mixing ratio of a gas.
This calculates mixing ratio given its partial pressure and the total pressure of
the air. There are no required units for the input arrays, other than that
they have the same units.
Parameters
----------
partial_press : `pint.Quantity`
Partial pressure of the constituent gas
total_press : `pint.Quantity`
Total air pressure
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air
(:math:`\epsilon\approx0.622`).
Returns
-------
`pint.Quantity`
The (mass) mixing ratio, dimensionless (e.g. Kg/Kg or g/g)
Notes
-----
This function is a straightforward implementation of the equation given in many places,
such as [Hobbs1977]_ pg.73:
.. math:: r = \epsilon \frac{e}{p - e}
.. versionchanged:: 1.0
Renamed ``part_press``, ``tot_press`` parameters to ``partial_press``, ``total_press``
See Also
--------
saturation_mixing_ratio, vapor_pressure
"""
return (molecular_weight_ratio * partial_press
/ (total_press - partial_press)).to('dimensionless')
@exporter.export
@preprocess_and_wrap(wrap_like='temperature', broadcast=('total_press', 'temperature'))
@check_units('[pressure]', '[temperature]')
def saturation_mixing_ratio(total_press, temperature):
r"""Calculate the saturation mixing ratio of water vapor.
This calculation is given total atmospheric pressure and air temperature.
Parameters
----------
total_press: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
Returns
-------
`pint.Quantity`
Saturation mixing ratio, dimensionless
Notes
-----
This function is a straightforward implementation of the equation given in many places,
such as [Hobbs1977]_ pg.73:
.. math:: r_s = \epsilon \frac{e_s}{p - e_s}
.. versionchanged:: 1.0
Renamed ``tot_press`` parameter to ``total_press``
"""
return mixing_ratio(saturation_vapor_pressure(temperature), total_press)
@exporter.export
@preprocess_and_wrap(
wrap_like='temperature',
broadcast=('pressure', 'temperature', 'dewpoint')
)
@check_units('[pressure]', '[temperature]', '[temperature]')
def equivalent_potential_temperature(pressure, temperature, dewpoint):
r"""Calculate equivalent potential temperature.
This calculation must be given an air parcel's pressure, temperature, and dewpoint.
The implementation uses the formula outlined in [Bolton1980]_:
First, the LCL temperature is calculated:
.. math:: T_{L}=\frac{1}{\frac{1}{T_{D}-56}+\frac{ln(T_{K}/T_{D})}{800}}+56
Which is then used to calculate the potential temperature at the LCL:
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
\left(\frac{T_{K}}{T_{L}}\right)^{.28r}
Both of these are used to calculate the final equivalent potential temperature:
.. math:: \theta_{E}=\theta_{DL}\exp\left[\left(\frac{3036.}{T_{L}}
-1.78\right)*r(1+.448r)\right]
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Temperature of parcel
dewpoint: `pint.Quantity`
Dewpoint of parcel
Returns
-------
`pint.Quantity`
Equivalent potential temperature of the parcel
Notes
-----
[Bolton1980]_ formula for Theta-e is used, since according to
[DaviesJones2009]_ it is the most accurate non-iterative formulation
available.
"""
t = temperature.to('kelvin').magnitude
td = dewpoint.to('kelvin').magnitude
r = saturation_mixing_ratio(pressure, dewpoint).magnitude
e = saturation_vapor_pressure(dewpoint)
t_l = 56 + 1. / (1. / (td - 56) + np.log(t / td) / 800.)
th_l = potential_temperature(pressure - e, temperature) * (t / t_l) ** (0.28 * r)
return th_l * np.exp(r * (1 + 0.448 * r) * (3036. / t_l - 1.78))
@exporter.export
@preprocess_and_wrap(wrap_like='temperature', broadcast=('pressure', 'temperature'))
@check_units('[pressure]', '[temperature]')
def saturation_equivalent_potential_temperature(pressure, temperature):
r"""Calculate saturation equivalent potential temperature.
This calculation must be given an air parcel's pressure and temperature.
The implementation uses the formula outlined in [Bolton1980]_ for the
equivalent potential temperature, and assumes a saturated process.
First, because we assume a saturated process, the temperature at the LCL is
equivalent to the current temperature. Therefore the following equation.
.. math:: T_{L}=\frac{1}{\frac{1}{T_{D}-56}+\frac{ln(T_{K}/T_{D})}{800}}+56
reduces to:
.. math:: T_{L} = T_{K}
Then the potential temperature at the temperature/LCL is calculated:
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
\left(\frac{T_{K}}{T_{L}}\right)^{.28r}
However, because:
.. math:: T_{L} = T_{K}
it follows that:
.. math:: \theta_{DL}=T_{K}\left(\frac{1000}{p-e}\right)^k
Both of these are used to calculate the final equivalent potential temperature:
.. math:: \theta_{E}=\theta_{DL}\exp\left[\left(\frac{3036.}{T_{K}}
-1.78\right)*r(1+.448r)\right]
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Temperature of parcel
Returns
-------
`pint.Quantity`
Saturation equivalent potential temperature of the parcel
Notes
-----
[Bolton1980]_ formula for Theta-e is used (for saturated case), since according to
[DaviesJones2009]_ it is the most accurate non-iterative formulation
available.
"""
t = temperature.to('kelvin').magnitude
p = pressure.to('hPa').magnitude
e = saturation_vapor_pressure(temperature).to('hPa').magnitude
r = saturation_mixing_ratio(pressure, temperature).magnitude
th_l = t * (1000 / (p - e)) ** mpconsts.kappa
th_es = th_l * np.exp((3036. / t - 1.78) * r * (1 + 0.448 * r))
return units.Quantity(th_es, units.kelvin)
@exporter.export
@preprocess_and_wrap(wrap_like='temperature', broadcast=('temperature', 'mixing_ratio'))
@check_units('[temperature]', '[dimensionless]', '[dimensionless]')
def virtual_temperature(temperature, mixing_ratio, molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate virtual temperature.
This calculation must be given an air parcel's temperature and mixing ratio.
The implementation uses the formula outlined in [Hobbs2006]_ pg.80.
Parameters
----------
temperature: `pint.Quantity`
Air temperature
mixing_ratio : `pint.Quantity`
Mass mixing ratio (dimensionless)
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`)
Returns
-------
`pint.Quantity`
Corresponding virtual temperature of the parcel
Notes
-----
.. math:: T_v = T \frac{\text{w} + \epsilon}{\epsilon\,(1 + \text{w})}
.. versionchanged:: 1.0
Renamed ``mixing`` parameter to ``mixing_ratio``
"""
return temperature * ((mixing_ratio + molecular_weight_ratio)
/ (molecular_weight_ratio * (1 + mixing_ratio)))
@exporter.export
@preprocess_and_wrap(
wrap_like='temperature',
broadcast=('pressure', 'temperature', 'mixing_ratio')
)
@check_units('[pressure]', '[temperature]', '[dimensionless]', '[dimensionless]')
def virtual_potential_temperature(pressure, temperature, mixing_ratio,
molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate virtual potential temperature.
This calculation must be given an air parcel's pressure, temperature, and mixing ratio.
The implementation uses the formula outlined in [Markowski2010]_ pg.13.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
mixing_ratio : `pint.Quantity`
Dimensionless mass mixing ratio
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`)
Returns
-------
`pint.Quantity`
Corresponding virtual potential temperature of the parcel
Notes
-----
.. math:: \Theta_v = \Theta \frac{\text{w} + \epsilon}{\epsilon\,(1 + \text{w})}
.. versionchanged:: 1.0
Renamed ``mixing`` parameter to ``mixing_ratio``
"""
pottemp = potential_temperature(pressure, temperature)
return virtual_temperature(pottemp, mixing_ratio, molecular_weight_ratio)
@exporter.export
@preprocess_and_wrap(
wrap_like='temperature',
broadcast=('pressure', 'temperature', 'mixing_ratio')
)
@check_units('[pressure]', '[temperature]', '[dimensionless]', '[dimensionless]')
def density(pressure, temperature, mixing_ratio, molecular_weight_ratio=mpconsts.epsilon):
r"""Calculate density.
This calculation must be given an air parcel's pressure, temperature, and mixing ratio.
The implementation uses the formula outlined in [Hobbs2006]_ pg.67.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
mixing_ratio : `pint.Quantity`
Mass mixing ratio (dimensionless)
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`)
Returns
-------
`pint.Quantity`
Corresponding density of the parcel
Notes
-----
.. math:: \rho = \frac{p}{R_dT_v}
.. versionchanged:: 1.0
Renamed ``mixing`` parameter to ``mixing_ratio``
"""
virttemp = virtual_temperature(temperature, mixing_ratio, molecular_weight_ratio)
return (pressure / (mpconsts.Rd * virttemp)).to('kg/m**3')
@exporter.export
@preprocess_and_wrap(
wrap_like='dry_bulb_temperature',
broadcast=('pressure', 'dry_bulb_temperature', 'wet_bulb_temperature')
)
@check_units('[pressure]', '[temperature]', '[temperature]')
def relative_humidity_wet_psychrometric(pressure, dry_bulb_temperature, wet_bulb_temperature,
**kwargs):
r"""Calculate the relative humidity with wet bulb and dry bulb temperatures.
This uses a psychrometric relationship as outlined in [WMO8]_, with
coefficients from [Fan1987]_.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
dry_bulb_temperature: `pint.Quantity`
Dry bulb temperature
wet_bulb_temperature: `pint.Quantity`
Wet bulb temperature
Returns
-------
`pint.Quantity`
Relative humidity
Notes
-----
.. math:: RH = \frac{e}{e_s}
* :math:`RH` is relative humidity as a unitless ratio
* :math:`e` is vapor pressure from the wet psychrometric calculation
* :math:`e_s` is the saturation vapor pressure
.. versionchanged:: 1.0
Changed signature from
``(dry_bulb_temperature, web_bulb_temperature, pressure, **kwargs)``
See Also
--------
psychrometric_vapor_pressure_wet, saturation_vapor_pressure
"""
return (psychrometric_vapor_pressure_wet(pressure, dry_bulb_temperature,
wet_bulb_temperature, **kwargs)
/ saturation_vapor_pressure(dry_bulb_temperature))
@exporter.export
@preprocess_and_wrap(
wrap_like='dry_bulb_temperature',
broadcast=('pressure', 'dry_bulb_temperature', 'wet_bulb_temperature')
)
@check_units('[pressure]', '[temperature]', '[temperature]')
def psychrometric_vapor_pressure_wet(pressure, dry_bulb_temperature, wet_bulb_temperature,
psychrometer_coefficient=None):
r"""Calculate the vapor pressure with wet bulb and dry bulb temperatures.
This uses a psychrometric relationship as outlined in [WMO8]_, with
coefficients from [Fan1987]_.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
dry_bulb_temperature: `pint.Quantity`
Dry bulb temperature
wet_bulb_temperature: `pint.Quantity`
Wet bulb temperature
psychrometer_coefficient: `pint.Quantity`, optional
Psychrometer coefficient. Defaults to 6.21e-4 K^-1.
Returns
-------
`pint.Quantity`
Vapor pressure
Notes
-----
.. math:: e' = e'_w(T_w) - A p (T - T_w)
* :math:`e'` is vapor pressure
* :math:`e'_w(T_w)` is the saturation vapor pressure with respect to water at temperature
:math:`T_w`
* :math:`p` is the pressure of the wet bulb
* :math:`T` is the temperature of the dry bulb
* :math:`T_w` is the temperature of the wet bulb
* :math:`A` is the psychrometer coefficient
Psychrometer coefficient depends on the specific instrument being used and the ventilation
of the instrument.
.. versionchanged:: 1.0
Changed signature from
``(dry_bulb_temperature, wet_bulb_temperature, pressure, psychrometer_coefficient)``
See Also
--------
saturation_vapor_pressure
"""
if psychrometer_coefficient is None:
psychrometer_coefficient = units.Quantity(6.21e-4, '1/K')
return (saturation_vapor_pressure(wet_bulb_temperature) - psychrometer_coefficient
* pressure * (dry_bulb_temperature - wet_bulb_temperature).to('kelvin'))
@exporter.export
@preprocess_and_wrap(
wrap_like='temperature',
broadcast=('pressure', 'temperature', 'relative_humidity')
)
@check_units('[pressure]', '[temperature]', '[dimensionless]')
def mixing_ratio_from_relative_humidity(pressure, temperature, relative_humidity):
r"""Calculate the mixing ratio from relative humidity, temperature, and pressure.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
relative_humidity: array_like
The relative humidity expressed as a unitless ratio in the range [0, 1]. Can also pass
a percentage if proper units are attached.
Returns
-------
`pint.Quantity`
Mixing ratio (dimensionless)
Notes
-----
Formula adapted from [Hobbs1977]_ pg. 74.
.. math:: w = (rh)(w_s)
* :math:`w` is mixing ratio
* :math:`rh` is relative humidity as a unitless ratio
* :math:`w_s` is the saturation mixing ratio
.. versionchanged:: 1.0
Changed signature from ``(relative_humidity, temperature, pressure)``
See Also
--------
relative_humidity_from_mixing_ratio, saturation_mixing_ratio
"""
return (relative_humidity
* saturation_mixing_ratio(pressure, temperature)).to('dimensionless')
@exporter.export
@preprocess_and_wrap(
wrap_like='temperature',
broadcast=('pressure', 'temperature', 'mixing_ratio')
)
@check_units('[pressure]', '[temperature]', '[dimensionless]')
def relative_humidity_from_mixing_ratio(pressure, temperature, mixing_ratio):
r"""Calculate the relative humidity from mixing ratio, temperature, and pressure.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
mixing_ratio: `pint.Quantity`
Dimensionless mass mixing ratio
Returns
-------
`pint.Quantity`
Relative humidity
Notes
-----
Formula based on that from [Hobbs1977]_ pg. 74.
.. math:: rh = \frac{w}{w_s}
* :math:`rh` is relative humidity as a unitless ratio
* :math:`w` is mixing ratio
* :math:`w_s` is the saturation mixing ratio
.. versionchanged:: 1.0
Changed signature from ``(mixing_ratio, temperature, pressure)``
See Also
--------
mixing_ratio_from_relative_humidity, saturation_mixing_ratio
"""
return mixing_ratio / saturation_mixing_ratio(pressure, temperature)
@exporter.export
@preprocess_and_wrap(wrap_like='specific_humidity')
@check_units('[dimensionless]')
def mixing_ratio_from_specific_humidity(specific_humidity):
r"""Calculate the mixing ratio from specific humidity.
Parameters
----------
specific_humidity: `pint.Quantity`
Specific humidity of air
Returns
-------
`pint.Quantity`
Mixing ratio
Notes
-----
Formula from [Salby1996]_ pg. 118.
.. math:: w = \frac{q}{1-q}
* :math:`w` is mixing ratio
* :math:`q` is the specific humidity
See Also
--------
mixing_ratio, specific_humidity_from_mixing_ratio
"""
with contextlib.suppress(AttributeError):
specific_humidity = specific_humidity.to('dimensionless')
return specific_humidity / (1 - specific_humidity)
@exporter.export
@preprocess_and_wrap(wrap_like='mixing_ratio')
@check_units('[dimensionless]')
def specific_humidity_from_mixing_ratio(mixing_ratio):
r"""Calculate the specific humidity from the mixing ratio.
Parameters
----------
mixing_ratio: `pint.Quantity`
Mixing ratio
Returns
-------
`pint.Quantity`
Specific humidity
Notes
-----
Formula from [Salby1996]_ pg. 118.
.. math:: q = \frac{w}{1+w}
* :math:`w` is mixing ratio
* :math:`q` is the specific humidity
See Also
--------
mixing_ratio, mixing_ratio_from_specific_humidity
"""
with contextlib.suppress(AttributeError):
mixing_ratio = mixing_ratio.to('dimensionless')
return mixing_ratio / (1 + mixing_ratio)
@exporter.export
@preprocess_and_wrap(
wrap_like='temperature',
broadcast=('pressure', 'temperature', 'specific_humidity')
)
@check_units('[pressure]', '[temperature]', '[dimensionless]')
def relative_humidity_from_specific_humidity(pressure, temperature, specific_humidity):
r"""Calculate the relative humidity from specific humidity, temperature, and pressure.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
specific_humidity: `pint.Quantity`
Specific humidity of air
Returns
-------
`pint.Quantity`
Relative humidity
Notes
-----
Formula based on that from [Hobbs1977]_ pg. 74. and [Salby1996]_ pg. 118.
.. math:: relative_humidity = \frac{q}{(1-q)w_s}
* :math:`relative_humidity` is relative humidity as a unitless ratio
* :math:`q` is specific humidity
* :math:`w_s` is the saturation mixing ratio
.. versionchanged:: 1.0
Changed signature from ``(specific_humidity, temperature, pressure)``
See Also
--------
relative_humidity_from_mixing_ratio
"""
return (mixing_ratio_from_specific_humidity(specific_humidity)
/ saturation_mixing_ratio(pressure, temperature))
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]', '[temperature]', '[temperature]', '[temperature]')
def cape_cin(pressure, temperature, dewpoint, parcel_profile, which_lfc='bottom',
which_el='top'):
r"""Calculate CAPE and CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile and parcel path. CIN is integrated between the surface and
LFC, CAPE is integrated between the LFC and EL (or top of sounding). Intersection points
of the measured temperature profile and parcel profile are logarithmically interpolated.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure level(s) of interest, in order from highest to
lowest pressure
temperature : `pint.Quantity`
Atmospheric temperature corresponding to pressure
dewpoint : `pint.Quantity`
Atmospheric dewpoint corresponding to pressure
parcel_profile : `pint.Quantity`
Temperature profile of the parcel
which_lfc : str
Choose which LFC to integrate from. Valid options are 'top', 'bottom', 'wide',
and 'most_cape'. Default is 'bottom'.
which_el : str
Choose which EL to integrate to. Valid options are 'top', 'bottom', 'wide',
and 'most_cape'. Default is 'top'.
Returns
-------
`pint.Quantity`
Convective Available Potential Energy (CAPE)
`pint.Quantity`
Convective Inhibition (CIN)
Notes
-----
Formula adopted from [Hobbs1977]_.
.. math:: \text{CAPE} = -R_d \int_{LFC}^{EL} (T_{parcel} - T_{env}) d\text{ln}(p)
.. math:: \text{CIN} = -R_d \int_{SFC}^{LFC} (T_{parcel} - T_{env}) d\text{ln}(p)
* :math:`CAPE` is convective available potential energy
* :math:`CIN` is convective inhibition
* :math:`LFC` is pressure of the level of free convection
* :math:`EL` is pressure of the equilibrium level
* :math:`SFC` is the level of the surface or beginning of parcel path
* :math:`R_d` is the gas constant
* :math:`g` is gravitational acceleration
* :math:`T_{parcel}` is the parcel temperature
* :math:`T_{env}` is environment temperature
* :math:`p` is atmospheric pressure
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
Since this function returns scalar values when given a profile, this will return Pint
Quantities even when given xarray DataArray profiles.
.. versionchanged:: 1.0
Renamed ``dewpt`` parameter to ``dewpoint``
See Also
--------
lfc, el
"""
pressure, temperature, dewpoint, parcel_profile = _remove_nans(pressure, temperature,
dewpoint, parcel_profile)
# Calculate LFC limit of integration
lfc_pressure, _ = lfc(pressure, temperature, dewpoint,
parcel_temperature_profile=parcel_profile, which=which_lfc)
# If there is no LFC, no need to proceed.
if np.isnan(lfc_pressure):
return units.Quantity(0, 'J/kg'), units.Quantity(0, 'J/kg')
else:
lfc_pressure = lfc_pressure.magnitude
# Calculate the EL limit of integration
el_pressure, _ = el(pressure, temperature, dewpoint,
parcel_temperature_profile=parcel_profile, which=which_el)
# No EL and we use the top reading of the sounding.
if np.isnan(el_pressure):
el_pressure = pressure[-1].magnitude
else:
el_pressure = el_pressure.magnitude
# Difference between the parcel path and measured temperature profiles
y = (parcel_profile - temperature).to(units.degK)
# Estimate zero crossings
x, y = _find_append_zero_crossings(np.copy(pressure), y)
# CAPE
# Only use data between the LFC and EL for calculation
p_mask = _less_or_close(x.m, lfc_pressure) & _greater_or_close(x.m, el_pressure)
x_clipped = x[p_mask].magnitude
y_clipped = y[p_mask].magnitude
cape = (mpconsts.Rd
* units.Quantity(np.trapz(y_clipped, np.log(x_clipped)), 'K')).to(units('J/kg'))
# CIN
# Only use data between the surface and LFC for calculation
p_mask = _greater_or_close(x.m, lfc_pressure)
x_clipped = x[p_mask].magnitude
y_clipped = y[p_mask].magnitude
cin = (mpconsts.Rd
* units.Quantity(np.trapz(y_clipped, np.log(x_clipped)), 'K')).to(units('J/kg'))
# Set CIN to 0 if it's returned as a positive value (#1190)
if cin > units.Quantity(0, 'J/kg'):
cin = units.Quantity(0, 'J/kg')
return cape, cin
def _find_append_zero_crossings(x, y):
r"""
Find and interpolate zero crossings.
Estimate the zero crossings of an x,y series and add estimated crossings to series,
returning a sorted array with no duplicate values.
Parameters
----------
x : `pint.Quantity`
X values of data
y : `pint.Quantity`
Y values of data
Returns
-------
x : `pint.Quantity`
X values of data
y : `pint.Quantity`
Y values of data
"""
crossings = find_intersections(x[1:], y[1:],
units.Quantity(np.zeros_like(y[1:]), y.units), log_x=True)
x = concatenate((x, crossings[0]))
y = concatenate((y, crossings[1]))
# Resort so that data are in order
sort_idx = np.argsort(x)
x = x[sort_idx]
y = y[sort_idx]
# Remove duplicate data points if there are any
keep_idx = np.ediff1d(x.magnitude, to_end=[1]) > 1e-6
x = x[keep_idx]
y = y[keep_idx]
return x, y
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]', '[temperature]', '[temperature]')
def most_unstable_parcel(pressure, temperature, dewpoint, height=None, bottom=None,
depth=None):
"""
Determine the most unstable parcel in a layer.
Determines the most unstable parcel of air by calculating the equivalent
potential temperature and finding its maximum in the specified layer.
Parameters
----------
pressure: `pint.Quantity`
Atmospheric pressure profile
temperature: `pint.Quantity`
Atmospheric temperature profile
dewpoint: `pint.Quantity`
Atmospheric dewpoint profile
height: `pint.Quantity`, optional
Atmospheric height profile. Standard atmosphere assumed when None (the default).
bottom: `pint.Quantity`, optional
Bottom of the layer to consider for the calculation in pressure or height.
Defaults to using the bottom pressure or height.
depth: `pint.Quantity`, optional
Depth of the layer to consider for the calculation in pressure or height. Defaults
to 300 hPa.
Returns
-------
`pint.Quantity`
Pressure, temperature, and dewpoint of most unstable parcel in the profile
integer
Index of the most unstable parcel in the given profile
See Also
--------
get_layer
Notes
-----
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
Since this function returns scalar values when given a profile, this will return Pint
Quantities even when given xarray DataArray profiles.
.. versionchanged:: 1.0
Renamed ``heights`` parameter to ``height``
"""
if depth is None:
depth = units.Quantity(300, 'hPa')
p_layer, t_layer, td_layer = get_layer(pressure, temperature, dewpoint, bottom=bottom,
depth=depth, height=height, interpolate=False)
theta_e = equivalent_potential_temperature(p_layer, t_layer, td_layer)
max_idx = np.argmax(theta_e)
return p_layer[max_idx], t_layer[max_idx], td_layer[max_idx], max_idx
@exporter.export
@add_vertical_dim_from_xarray
@preprocess_and_wrap()
@check_units('[temperature]', '[pressure]', '[temperature]')
def isentropic_interpolation(levels, pressure, temperature, *args, vertical_dim=0,
temperature_out=False, max_iters=50, eps=1e-6,
bottom_up_search=True, **kwargs):
r"""Interpolate data in isobaric coordinates to isentropic coordinates.
Parameters
----------
levels : array
One-dimensional array of desired potential temperature surfaces
pressure : array
One-dimensional array of pressure levels
temperature : array
Array of temperature
vertical_dim : int, optional
The axis corresponding to the vertical in the temperature array, defaults to 0.
temperature_out : bool, optional
If true, will calculate temperature and output as the last item in the output list.
Defaults to False.
max_iters : int, optional
Maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired absolute error in the calculated value, defaults to 1e-6.
bottom_up_search : bool, optional
Controls whether to search for levels bottom-up, or top-down. Defaults to
True, which is bottom-up search.
args : array, optional
Any additional variables will be interpolated to each isentropic level
Returns
-------
list
List with pressure at each isentropic level, followed by each additional
argument interpolated to isentropic coordinates.
Notes
-----
Input variable arrays must have the same number of vertical levels as the pressure levels
array. Pressure is calculated on isentropic surfaces by assuming that temperature varies
linearly with the natural log of pressure. Linear interpolation is then used in the
vertical to find the pressure at each isentropic level. Interpolation method from
[Ziv1994]_. Any additional arguments are assumed to vary linearly with temperature and will
be linearly interpolated to the new isentropic levels.
Will only return Pint Quantities, even when given xarray DataArray profiles. To
obtain a xarray Dataset instead, use `isentropic_interpolation_as_dataset` instead.
.. versionchanged:: 1.0
Renamed ``theta_levels``, ``axis`` parameters to ``levels``, ``vertical_dim``
See Also
--------
potential_temperature, isentropic_interpolation_as_dataset
"""
# iteration function to be used later
# Calculates theta from linearly interpolated temperature and solves for pressure
def _isen_iter(iter_log_p, isentlevs_nd, ka, a, b, pok):
exner = pok * np.exp(-ka * iter_log_p)
t = a * iter_log_p + b
# Newton-Raphson iteration
f = isentlevs_nd - t * exner
fp = exner * (ka * t - a)
return iter_log_p - (f / fp)
# Get dimensions in temperature
ndim = temperature.ndim
# Convert units
pressure = pressure.to('hPa')
temperature = temperature.to('kelvin')
slices = [np.newaxis] * ndim
slices[vertical_dim] = slice(None)
slices = tuple(slices)
pressure = units.Quantity(np.broadcast_to(pressure[slices].magnitude, temperature.shape),
pressure.units)
# Sort input data
sort_pressure = np.argsort(pressure.m, axis=vertical_dim)
sort_pressure = np.swapaxes(np.swapaxes(sort_pressure, 0, vertical_dim)[::-1], 0,
vertical_dim)
sorter = broadcast_indices(pressure, sort_pressure, ndim, vertical_dim)
levs = pressure[sorter]
tmpk = temperature[sorter]
levels = np.asarray(levels.m_as('kelvin')).reshape(-1)
isentlevels = levels[np.argsort(levels)]
# Make the desired isentropic levels the same shape as temperature
shape = list(temperature.shape)
shape[vertical_dim] = isentlevels.size
isentlevs_nd = np.broadcast_to(isentlevels[slices], shape)
# exponent to Poisson's Equation, which is imported above
ka = mpconsts.kappa.m_as('dimensionless')
# calculate theta for each point
pres_theta = potential_temperature(levs, tmpk)
# Raise error if input theta level is larger than pres_theta max
if np.max(pres_theta.m) < np.max(levels):
raise ValueError('Input theta level out of data bounds')
# Find log of pressure to implement assumption of linear temperature dependence on
# ln(p)
log_p = np.log(levs.m)
# Calculations for interpolation routine
pok = mpconsts.P0 ** ka
# index values for each point for the pressure level nearest to the desired theta level
above, below, good = find_bounding_indices(pres_theta.m, levels, vertical_dim,
from_below=bottom_up_search)
# calculate constants for the interpolation
a = (tmpk.m[above] - tmpk.m[below]) / (log_p[above] - log_p[below])
b = tmpk.m[above] - a * log_p[above]
# calculate first guess for interpolation
isentprs = 0.5 * (log_p[above] + log_p[below])
# Make sure we ignore any nans in the data for solving; checking a is enough since it
# combines log_p and tmpk.
good &= ~np.isnan(a)
# iterative interpolation using scipy.optimize.fixed_point and _isen_iter defined above
log_p_solved = so.fixed_point(_isen_iter, isentprs[good],
args=(isentlevs_nd[good], ka, a[good], b[good], pok.m),
xtol=eps, maxiter=max_iters)
# get back pressure from log p
isentprs[good] = np.exp(log_p_solved)
# Mask out points we know are bad as well as points that are beyond the max pressure
isentprs[~(good & _less_or_close(isentprs, np.max(pressure.m)))] = np.nan
# create list for storing output data
ret = [units.Quantity(isentprs, 'hPa')]
# if temperature_out = true, calculate temperature and output as last item in list
if temperature_out:
ret.append(units.Quantity((isentlevs_nd / ((mpconsts.P0.m / isentprs) ** ka)), 'K'))
# do an interpolation for each additional argument
if args:
others = interpolate_1d(isentlevels, pres_theta.m, *(arr[sorter] for arr in args),
axis=vertical_dim, return_list_always=True)
ret.extend(others)
return ret
@exporter.export
def isentropic_interpolation_as_dataset(
levels,
temperature,
*args,
max_iters=50,
eps=1e-6,
bottom_up_search=True
):
r"""Interpolate xarray data in isobaric coords to isentropic coords, returning a Dataset.
Parameters
----------
levels : `pint.Quantity`
One-dimensional array of desired potential temperature surfaces
temperature : `xarray.DataArray`
Array of temperature
args : `xarray.DataArray`, optional
Any other given variables will be interpolated to each isentropic level. Must have
names in order to have a well-formed output Dataset.
max_iters : int, optional
The maximum number of iterations to use in calculation, defaults to 50.
eps : float, optional
The desired absolute error in the calculated value, defaults to 1e-6.
bottom_up_search : bool, optional
Controls whether to search for levels bottom-up, or top-down. Defaults to
True, which is bottom-up search.
Returns
-------
xarray.Dataset
Dataset with pressure, temperature, and each additional argument, all on the specified
isentropic coordinates.
Notes
-----
Input variable arrays must have the same number of vertical levels as the pressure levels
array. Pressure is calculated on isentropic surfaces by assuming that temperature varies
linearly with the natural log of pressure. Linear interpolation is then used in the
vertical to find the pressure at each isentropic level. Interpolation method from
[Ziv1994]_. Any additional arguments are assumed to vary linearly with temperature and will
be linearly interpolated to the new isentropic levels.
This formulation relies upon xarray functionality. If using Pint Quantities, use
`isentropic_interpolation` instead.
See Also
--------
potential_temperature, isentropic_interpolation
"""
# Ensure matching coordinates by broadcasting
all_args = xr.broadcast(temperature, *args)
# Obtain result as list of Quantities
ret = isentropic_interpolation(
levels,
all_args[0].metpy.vertical,
all_args[0].metpy.unit_array,
*(arg.metpy.unit_array for arg in all_args[1:]),
vertical_dim=all_args[0].metpy.find_axis_number('vertical'),
temperature_out=True,
max_iters=max_iters,
eps=eps,
bottom_up_search=bottom_up_search
)
# Reconstruct coordinates and dims (add isentropic levels, remove isobaric levels)
vertical_dim = all_args[0].metpy.find_axis_name('vertical')
new_coords = {
'isentropic_level': xr.DataArray(
levels.m,
dims=('isentropic_level',),
coords={'isentropic_level': levels.m},
name='isentropic_level',
attrs={
'units': str(levels.units),
'positive': 'up'
}
),
**{
key: value
for key, value in all_args[0].coords.items()
if key != vertical_dim
}
}
new_dims = [
dim if dim != vertical_dim else 'isentropic_level' for dim in all_args[0].dims
]
# Build final dataset from interpolated Quantities and original DataArrays
return xr.Dataset(
{
'pressure': (
new_dims,
ret[0],
{'standard_name': 'air_pressure'}
),
'temperature': (
new_dims,
ret[1],
{'standard_name': 'air_temperature'}
),
**{
all_args[i].name: (new_dims, ret[i + 1], all_args[i].attrs)
for i in range(1, len(all_args))
}
},
coords=new_coords
)
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]', '[temperature]', '[temperature]')
def surface_based_cape_cin(pressure, temperature, dewpoint):
r"""Calculate surface-based CAPE and CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile for a surface-based parcel. CIN is integrated
between the surface and LFC, CAPE is integrated between the LFC and EL (or top of
sounding). Intersection points of the measured temperature profile and parcel profile are
logarithmically interpolated.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile. The first entry should be the starting
(surface) observation, with the array going from high to low pressure.
temperature : `pint.Quantity`
Temperature profile corresponding to the `pressure` profile
dewpoint : `pint.Quantity`
Dewpoint profile corresponding to the `pressure` profile
Returns
-------
`pint.Quantity`
Surface based Convective Available Potential Energy (CAPE)
`pint.Quantity`
Surface based Convective Inhibition (CIN)
See Also
--------
cape_cin, parcel_profile
Notes
-----
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
Since this function returns scalar values when given a profile, this will return Pint
Quantities even when given xarray DataArray profiles.
"""
pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
p, t, td, profile = parcel_profile_with_lcl(pressure, temperature, dewpoint)
return cape_cin(p, t, td, profile)
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]', '[temperature]', '[temperature]')
def most_unstable_cape_cin(pressure, temperature, dewpoint, **kwargs):
r"""Calculate most unstable CAPE/CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile and most unstable parcel path. CIN is integrated between the
surface and LFC, CAPE is integrated between the LFC and EL (or top of sounding).
Intersection points of the measured temperature profile and parcel profile are
logarithmically interpolated.
Parameters
----------
pressure : `pint.Quantity`
Pressure profile
temperature : `pint.Quantity`
Temperature profile
dewpoint : `pint.Quantity`
Dew point profile
kwargs
Additional keyword arguments to pass to `most_unstable_parcel`
Returns
-------
`pint.Quantity`
Most unstable Convective Available Potential Energy (CAPE)
`pint.Quantity`
Most unstable Convective Inhibition (CIN)
See Also
--------
cape_cin, most_unstable_parcel, parcel_profile
Notes
-----
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
Since this function returns scalar values when given a profile, this will return Pint
Quantities even when given xarray DataArray profiles.
"""
pressure, temperature, dewpoint = _remove_nans(pressure, temperature, dewpoint)
_, _, _, parcel_idx = most_unstable_parcel(pressure, temperature, dewpoint, **kwargs)
p, t, td, mu_profile = parcel_profile_with_lcl(pressure[parcel_idx:],
temperature[parcel_idx:],
dewpoint[parcel_idx:])
return cape_cin(p, t, td, mu_profile)
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]', '[temperature]', '[temperature]')
def mixed_layer_cape_cin(pressure, temperature, dewpoint, **kwargs):
r"""Calculate mixed-layer CAPE and CIN.
Calculate the convective available potential energy (CAPE) and convective inhibition (CIN)
of a given upper air profile and mixed-layer parcel path. CIN is integrated between the
surface and LFC, CAPE is integrated between the LFC and EL (or top of sounding).
Intersection points of the measured temperature profile and parcel profile are
logarithmically interpolated. Kwargs for `mixed_parcel` can be provided, such as `depth`.
Default mixed-layer depth is 100 hPa.
Parameters
----------
pressure : `pint.Quantity`
Pressure profile
temperature : `pint.Quantity`
Temperature profile
dewpoint : `pint.Quantity`
Dewpoint profile
kwargs
Additional keyword arguments to pass to `mixed_parcel`
Returns
-------
`pint.Quantity`
Mixed-layer Convective Available Potential Energy (CAPE)
`pint.Quantity`
Mixed-layer Convective INhibition (CIN)
See Also
--------
cape_cin, mixed_parcel, parcel_profile
Notes
-----
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
Since this function returns scalar values when given a profile, this will return Pint
Quantities even when given xarray DataArray profiles.
"""
depth = kwargs.get('depth', units.Quantity(100, 'hPa'))
parcel_pressure, parcel_temp, parcel_dewpoint = mixed_parcel(pressure, temperature,
dewpoint, **kwargs)
# Remove values below top of mixed layer and add in the mixed layer values
pressure_prof = pressure[pressure < (pressure[0] - depth)]
temp_prof = temperature[pressure < (pressure[0] - depth)]
dew_prof = dewpoint[pressure < (pressure[0] - depth)]
pressure_prof = concatenate([parcel_pressure, pressure_prof])
temp_prof = concatenate([parcel_temp, temp_prof])
dew_prof = concatenate([parcel_dewpoint, dew_prof])
p, t, td, ml_profile = parcel_profile_with_lcl(pressure_prof, temp_prof, dew_prof)
return cape_cin(p, t, td, ml_profile)
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]', '[temperature]', '[temperature]')
def mixed_parcel(pressure, temperature, dewpoint, parcel_start_pressure=None,
height=None, bottom=None, depth=None, interpolate=True):
r"""Calculate the properties of a parcel mixed from a layer.
Determines the properties of an air parcel that is the result of complete mixing of a
given atmospheric layer.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
temperature : `pint.Quantity`
Atmospheric temperature profile
dewpoint : `pint.Quantity`
Atmospheric dewpoint profile
parcel_start_pressure : `pint.Quantity`, optional
Pressure at which the mixed parcel should begin (default None)
height: `pint.Quantity`, optional
Atmospheric heights corresponding to the given pressures (default None)
bottom : `pint.Quantity`, optional
The bottom of the layer as a pressure or height above the surface pressure
(default None)
depth : `pint.Quantity`, optional
The thickness of the layer as a pressure or height above the bottom of the layer
(default 100 hPa)
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data
Returns
-------
`pint.Quantity`
Pressure of the mixed parcel
`pint.Quantity`
Temperature of the mixed parcel
`pint.Quantity`
Dewpoint of the mixed parcel
Notes
-----
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
Since this function returns scalar values when given a profile, this will return Pint
Quantities even when given xarray DataArray profiles.
.. versionchanged:: 1.0
Renamed ``p``, ``dewpt``, ``heights`` parameters to
``pressure``, ``dewpoint``, ``height``
"""
# If a parcel starting pressure is not provided, use the surface
if not parcel_start_pressure:
parcel_start_pressure = pressure[0]
if depth is None:
depth = units.Quantity(100, 'hPa')
# Calculate the potential temperature and mixing ratio over the layer
theta = potential_temperature(pressure, temperature)
mixing_ratio = saturation_mixing_ratio(pressure, dewpoint)
# Mix the variables over the layer
mean_theta, mean_mixing_ratio = mixed_layer(pressure, theta, mixing_ratio, bottom=bottom,
height=height, depth=depth,
interpolate=interpolate)
# Convert back to temperature
mean_temperature = mean_theta * exner_function(parcel_start_pressure)
# Convert back to dewpoint
mean_vapor_pressure = vapor_pressure(parcel_start_pressure, mean_mixing_ratio)
# Using globals() here allows us to keep the dewpoint parameter but still call the
# function of the same name.
mean_dewpoint = globals()['dewpoint'](mean_vapor_pressure)
return (parcel_start_pressure, mean_temperature.to(temperature.units),
mean_dewpoint.to(dewpoint.units))
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]')
def mixed_layer(pressure, *args, height=None, bottom=None, depth=None, interpolate=True):
r"""Mix variable(s) over a layer, yielding a mass-weighted average.
This function will integrate a data variable with respect to pressure and determine the
average value using the mean value theorem.
Parameters
----------
pressure : array-like
Atmospheric pressure profile
datavar : array-like
Atmospheric variable measured at the given pressures
height: array-like, optional
Atmospheric heights corresponding to the given pressures (default None)
bottom : `pint.Quantity`, optional
The bottom of the layer as a pressure or height above the surface pressure
(default None)
depth : `pint.Quantity`, optional
The thickness of the layer as a pressure or height above the bottom of the layer
(default 100 hPa)
interpolate : bool, optional
Interpolate the top and bottom points if they are not in the given data (default True)
Returns
-------
`pint.Quantity`
The mixed value of the data variable
Notes
-----
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
Since this function returns scalar values when given a profile, this will return Pint
Quantities even when given xarray DataArray profiles.
.. versionchanged:: 1.0
Renamed ``p``, ``heights`` parameters to ``pressure``, ``height``
"""
if depth is None:
depth = units.Quantity(100, 'hPa')
layer = get_layer(pressure, *args, height=height, bottom=bottom,
depth=depth, interpolate=interpolate)
p_layer = layer[0]
datavars_layer = layer[1:]
ret = []
for datavar_layer in datavars_layer:
actual_depth = abs(p_layer[0] - p_layer[-1])
ret.append(units.Quantity(np.trapz(datavar_layer.m, p_layer.m) / -actual_depth.m,
datavar_layer.units))
return ret
@exporter.export
@preprocess_and_wrap(wrap_like='temperature', broadcast=('height', 'temperature'))
@check_units('[length]', '[temperature]')
def dry_static_energy(height, temperature):
r"""Calculate the dry static energy of parcels.
This function will calculate the dry static energy following the first two terms of
equation 3.72 in [Hobbs2006]_.
Notes
-----
.. math::\text{dry static energy} = c_{pd} * T + gz
* :math:`T` is temperature
* :math:`z` is height
Parameters
----------
height : `pint.Quantity`
Atmospheric height
temperature : `pint.Quantity`
Air temperature
Returns
-------
`pint.Quantity`
Dry static energy
.. versionchanged:: 1.0
Renamed ``heights`` parameter to ``height``
See Also
--------
montgomery_streamfunction
"""
return (mpconsts.g * height + mpconsts.Cp_d * temperature).to('kJ/kg')
@exporter.export
@preprocess_and_wrap(
wrap_like='temperature',
broadcast=('height', 'temperature', 'specific_humidity')
)
@check_units('[length]', '[temperature]', '[dimensionless]')
def moist_static_energy(height, temperature, specific_humidity):
r"""Calculate the moist static energy of parcels.
This function will calculate the moist static energy following
equation 3.72 in [Hobbs2006]_.
Parameters
----------
height : `pint.Quantity`
Atmospheric height
temperature : `pint.Quantity`
Air temperature
specific_humidity : `pint.Quantity`
Atmospheric specific humidity
Returns
-------
`pint.Quantity`
Moist static energy
Notes
-----
.. math::\text{moist static energy} = c_{pd} * T + gz + L_v q
* :math:`T` is temperature
* :math:`z` is height
* :math:`q` is specific humidity
.. versionchanged:: 1.0
Renamed ``heights`` parameter to ``height``
"""
return (dry_static_energy(height, temperature)
+ mpconsts.Lv * specific_humidity.to('dimensionless')).to('kJ/kg')
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]', '[temperature]')
def thickness_hydrostatic(pressure, temperature, mixing_ratio=None,
molecular_weight_ratio=mpconsts.epsilon, bottom=None, depth=None):
r"""Calculate the thickness of a layer via the hypsometric equation.
This thickness calculation uses the pressure and temperature profiles (and optionally
mixing ratio) via the hypsometric equation with virtual temperature adjustment.
.. math:: Z_2 - Z_1 = -\frac{R_d}{g} \int_{p_1}^{p_2} T_v d\ln p,
Which is based off of Equation 3.24 in [Hobbs2006]_.
This assumes a hydrostatic atmosphere. Layer bottom and depth specified in pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
temperature : `pint.Quantity`
Atmospheric temperature profile
mixing_ratio : `pint.Quantity`, optional
Profile of dimensionless mass mixing ratio. If none is given, virtual temperature
is simply set to be the given temperature.
molecular_weight_ratio : `pint.Quantity` or float, optional
The ratio of the molecular weight of the constituent gas to that assumed
for air. Defaults to the ratio for water vapor to dry air.
(:math:`\epsilon\approx0.622`)
bottom : `pint.Quantity`, optional
The bottom of the layer in pressure. Defaults to the first observation.
depth : `pint.Quantity`, optional
The depth of the layer in hPa. Defaults to the full profile if bottom is not given,
and 100 hPa if bottom is given.
Returns
-------
`pint.Quantity`
The thickness of the layer in meters
See Also
--------
thickness_hydrostatic_from_relative_humidity, pressure_to_height_std, virtual_temperature
Notes
-----
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
Since this function returns scalar values when given a profile, this will return Pint
Quantities even when given xarray DataArray profiles.
.. versionchanged:: 1.0
Renamed ``mixing`` parameter to ``mixing_ratio``
"""
# Get the data for the layer, conditional upon bottom/depth being specified and mixing
# ratio being given
if bottom is None and depth is None:
if mixing_ratio is None:
layer_p, layer_virttemp = pressure, temperature
else:
layer_p = pressure
layer_virttemp = virtual_temperature(temperature, mixing_ratio,
molecular_weight_ratio)
else:
if mixing_ratio is None:
layer_p, layer_virttemp = get_layer(pressure, temperature, bottom=bottom,
depth=depth)
else:
layer_p, layer_temp, layer_w = get_layer(pressure, temperature, mixing_ratio,
bottom=bottom, depth=depth)
layer_virttemp = virtual_temperature(layer_temp, layer_w, molecular_weight_ratio)
# Take the integral (with unit handling) and return the result in meters
integral = units.Quantity(np.trapz(layer_virttemp.m_as('K'), np.log(layer_p.m_as('hPa'))),
units.K)
return (-mpconsts.Rd / mpconsts.g * integral).to('m')
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]', '[temperature]')
def thickness_hydrostatic_from_relative_humidity(pressure, temperature, relative_humidity,
bottom=None, depth=None):
r"""Calculate the thickness of a layer given pressure, temperature and relative humidity.
Similar to ``thickness_hydrostatic``, this thickness calculation uses the pressure,
temperature, and relative humidity profiles via the hypsometric equation with virtual
temperature adjustment
.. math:: Z_2 - Z_1 = -\frac{R_d}{g} \int_{p_1}^{p_2} T_v d\ln p,
which is based off of Equation 3.24 in [Hobbs2006]_. Virtual temperature is calculated
from the profiles of temperature and relative humidity.
This assumes a hydrostatic atmosphere.
Layer bottom and depth specified in pressure.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure profile
temperature : `pint.Quantity`
Atmospheric temperature profile
relative_humidity : `pint.Quantity`
Atmospheric relative humidity profile. The relative humidity is expressed as a
unitless ratio in the range [0, 1]. Can also pass a percentage if proper units are
attached.
bottom : `pint.Quantity`, optional
The bottom of the layer in pressure. Defaults to the first observation.
depth : `pint.Quantity`, optional
The depth of the layer in hPa. Defaults to the full profile if bottom is not given,
and 100 hPa if bottom is given.
Returns
-------
`pint.Quantity`
The thickness of the layer in meters
See Also
--------
thickness_hydrostatic, pressure_to_height_std, virtual_temperature,
mixing_ratio_from_relative_humidity
Notes
-----
Only functions on 1D profiles (not higher-dimension vertical cross sections or grids).
Since this function returns scalar values when given a profile, this will return Pint
Quantities even when given xarray DataArray profiles.
"""
mixing = mixing_ratio_from_relative_humidity(pressure, temperature, relative_humidity)
return thickness_hydrostatic(pressure, temperature, mixing_ratio=mixing, bottom=bottom,
depth=depth)
@exporter.export
@add_vertical_dim_from_xarray
@preprocess_and_wrap(wrap_like='height', broadcast=('height', 'potential_temperature'))
@check_units('[length]', '[temperature]')
def brunt_vaisala_frequency_squared(height, potential_temperature, vertical_dim=0):
r"""Calculate the square of the Brunt-Vaisala frequency.
Brunt-Vaisala frequency squared (a measure of atmospheric stability) is given by the
formula:
.. math:: N^2 = \frac{g}{\theta} \frac{d\theta}{dz}
This formula is based off of Equations 3.75 and 3.77 in [Hobbs2006]_.
Parameters
----------
height : `xarray.DataArray` or `pint.Quantity`
Atmospheric (geopotential) height
potential_temperature : `xarray.DataArray` or `pint.Quantity`
Atmospheric potential temperature
vertical_dim : int, optional
The axis corresponding to vertical in the potential temperature array, defaults to 0,
unless `height` and `potential_temperature` given as `xarray.DataArray`, in which case
it is automatically determined from the coordinate metadata.
Returns
-------
`pint.Quantity` or `xarray.DataArray`
The square of the Brunt-Vaisala frequency. Given as `pint.Quantity`, unless both
`height` and `potential_temperature` arguments are given as `xarray.DataArray`, in
which case will be `xarray.DataArray`.
.. versionchanged:: 1.0
Renamed ``heights``, ``axis`` parameters to ``height``, ``vertical_dim``
See Also
--------
brunt_vaisala_frequency, brunt_vaisala_period, potential_temperature
"""
# Ensure validity of temperature units
potential_temperature = potential_temperature.to('K')
# Calculate and return the square of Brunt-Vaisala frequency
return mpconsts.g / potential_temperature * first_derivative(
potential_temperature,
x=height,
axis=vertical_dim
)
@exporter.export
@add_vertical_dim_from_xarray
@preprocess_and_wrap(wrap_like='height', broadcast=('height', 'potential_temperature'))
@check_units('[length]', '[temperature]')
def brunt_vaisala_frequency(height, potential_temperature, vertical_dim=0):
r"""Calculate the Brunt-Vaisala frequency.
This function will calculate the Brunt-Vaisala frequency as follows:
.. math:: N = \left( \frac{g}{\theta} \frac{d\theta}{dz} \right)^\frac{1}{2}
This formula based off of Equations 3.75 and 3.77 in [Hobbs2006]_.
This function is a wrapper for `brunt_vaisala_frequency_squared` that filters out negative
(unstable) quantities and takes the square root.
Parameters
----------
height : `xarray.DataArray` or `pint.Quantity`
Atmospheric (geopotential) height
potential_temperature : `xarray.DataArray` or `pint.Quantity`
Atmospheric potential temperature
vertical_dim : int, optional
The axis corresponding to vertical in the potential temperature array, defaults to 0,
unless `height` and `potential_temperature` given as `xarray.DataArray`, in which case
it is automatically determined from the coordinate metadata.
Returns
-------
`pint.Quantity` or `xarray.DataArray`
Brunt-Vaisala frequency. Given as `pint.Quantity`, unless both
`height` and `potential_temperature` arguments are given as `xarray.DataArray`, in
which case will be `xarray.DataArray`.
.. versionchanged:: 1.0
Renamed ``heights``, ``axis`` parameters to ``height``, ``vertical_dim``
See Also
--------
brunt_vaisala_frequency_squared, brunt_vaisala_period, potential_temperature
"""
bv_freq_squared = brunt_vaisala_frequency_squared(height, potential_temperature,
vertical_dim=vertical_dim)
bv_freq_squared[bv_freq_squared.magnitude < 0] = np.nan
return np.sqrt(bv_freq_squared)
@exporter.export
@add_vertical_dim_from_xarray
@preprocess_and_wrap(wrap_like='height', broadcast=('height', 'potential_temperature'))
@check_units('[length]', '[temperature]')
def brunt_vaisala_period(height, potential_temperature, vertical_dim=0):
r"""Calculate the Brunt-Vaisala period.
This function is a helper function for `brunt_vaisala_frequency` that calculates the
period of oscillation as in Exercise 3.13 of [Hobbs2006]_:
.. math:: \tau = \frac{2\pi}{N}
Returns `NaN` when :math:`N^2 > 0`.
Parameters
----------
height : `xarray.DataArray` or `pint.Quantity`
Atmospheric (geopotential) height
potential_temperature : `xarray.DataArray` or `pint.Quantity`
Atmospheric potential temperature
vertical_dim : int, optional
The axis corresponding to vertical in the potential temperature array, defaults to 0,
unless `height` and `potential_temperature` given as `xarray.DataArray`, in which case
it is automatically determined from the coordinate metadata.
Returns
-------
`pint.Quantity` or `xarray.DataArray`
Brunt-Vaisala period. Given as `pint.Quantity`, unless both
`height` and `potential_temperature` arguments are given as `xarray.DataArray`, in
which case will be `xarray.DataArray`.
.. versionchanged:: 1.0
Renamed ``heights``, ``axis`` parameters to ``height``, ``vertical_dim``
See Also
--------
brunt_vaisala_frequency, brunt_vaisala_frequency_squared, potential_temperature
"""
bv_freq_squared = brunt_vaisala_frequency_squared(height, potential_temperature,
vertical_dim=vertical_dim)
bv_freq_squared[bv_freq_squared.magnitude <= 0] = np.nan
return 2 * np.pi / np.sqrt(bv_freq_squared)
@exporter.export
@preprocess_and_wrap(
wrap_like='temperature',
broadcast=('pressure', 'temperature', 'dewpoint')
)
@check_units('[pressure]', '[temperature]', '[temperature]')
def wet_bulb_temperature(pressure, temperature, dewpoint):
"""Calculate the wet-bulb temperature using Normand's rule.
This function calculates the wet-bulb temperature using the Normand method. The LCL is
computed, and that parcel brought down to the starting pressure along a moist adiabat.
The Normand method (and others) are described and compared by [Knox2017]_.
Parameters
----------
pressure : `pint.Quantity`
Initial atmospheric pressure
temperature : `pint.Quantity`
Initial atmospheric temperature
dewpoint : `pint.Quantity`
Initial atmospheric dewpoint
Returns
-------
`pint.Quantity`
Wet-bulb temperature
See Also
--------
lcl, moist_lapse
Notes
-----
Since this function iteratively applies a parcel calculation, it should be used with
caution on large arrays.
"""
if not hasattr(pressure, 'shape'):
pressure = np.atleast_1d(pressure)
temperature = np.atleast_1d(temperature)
dewpoint = np.atleast_1d(dewpoint)
lcl_press, lcl_temp = lcl(pressure, temperature, dewpoint)
it = np.nditer([pressure.magnitude, lcl_press.magnitude, lcl_temp.magnitude, None],
op_dtypes=['float', 'float', 'float', 'float'],
flags=['buffered'])
for press, lpress, ltemp, ret in it:
moist_adiabat_temperatures = moist_lapse(units.Quantity(press, pressure.units),
units.Quantity(ltemp, lcl_temp.units),
units.Quantity(lpress, lcl_press.units))
ret[...] = moist_adiabat_temperatures.magnitude
# If we started with a scalar, return a scalar
ret = it.operands[3]
if ret.size == 1:
ret = ret[0]
return units.Quantity(ret, moist_adiabat_temperatures.units)
@exporter.export
@add_vertical_dim_from_xarray
@preprocess_and_wrap(wrap_like='temperature', broadcast=('pressure', 'temperature'))
@check_units('[pressure]', '[temperature]')
def static_stability(pressure, temperature, vertical_dim=0):
r"""Calculate the static stability within a vertical profile.
.. math:: \sigma = -\frac{RT}{p} \frac{\partial \ln \theta}{\partial p}
This formula is based on equation 4.3.6 in [Bluestein1992]_.
Parameters
----------
pressure : `pint.Quantity`
Profile of atmospheric pressure
temperature : `pint.Quantity`
Profile of temperature
vertical_dim : int, optional
The axis corresponding to vertical in the pressure and temperature arrays, defaults
to 0.
Returns
-------
`pint.Quantity`
The profile of static stability
.. versionchanged:: 1.0
Renamed ``axis`` parameter ``vertical_dim``
"""
theta = potential_temperature(pressure, temperature)
return - mpconsts.Rd * temperature / pressure * first_derivative(
np.log(theta.m_as('K')),
x=pressure,
axis=vertical_dim
)
@exporter.export
@preprocess_and_wrap(
wrap_like='temperature',
broadcast=('pressure', 'temperature', 'specific_humdiity')
)
@check_units('[pressure]', '[temperature]', '[dimensionless]')
def dewpoint_from_specific_humidity(pressure, temperature, specific_humidity):
r"""Calculate the dewpoint from specific humidity, temperature, and pressure.
Parameters
----------
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
specific_humidity: `pint.Quantity`
Specific humidity of air
Returns
-------
`pint.Quantity`
Dew point temperature
.. versionchanged:: 1.0
Changed signature from ``(specific_humidity, temperature, pressure)``
See Also
--------
relative_humidity_from_mixing_ratio, dewpoint_from_relative_humidity
"""
return dewpoint_from_relative_humidity(temperature,
relative_humidity_from_specific_humidity(
pressure, temperature, specific_humidity))
@exporter.export
@preprocess_and_wrap(wrap_like='w', broadcast=('w', 'pressure', 'temperature'))
@check_units('[length]/[time]', '[pressure]', '[temperature]')
def vertical_velocity_pressure(w, pressure, temperature, mixing_ratio=0):
r"""Calculate omega from w assuming hydrostatic conditions.
This function converts vertical velocity with respect to height
:math:`\left(w = \frac{Dz}{Dt}\right)` to that
with respect to pressure :math:`\left(\omega = \frac{Dp}{Dt}\right)`
assuming hydrostatic conditions on the synoptic scale.
By Equation 7.33 in [Hobbs2006]_,
.. math:: \omega \simeq -\rho g w
Density (:math:`\rho`) is calculated using the :func:`density` function,
from the given pressure and temperature. If `mixing_ratio` is given, the virtual
temperature correction is used, otherwise, dry air is assumed.
Parameters
----------
w: `pint.Quantity`
Vertical velocity in terms of height
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
mixing_ratio: `pint.Quantity`, optional
Mixing ratio of air
Returns
-------
`pint.Quantity`
Vertical velocity in terms of pressure (in Pascals / second)
See Also
--------
density, vertical_velocity
"""
rho = density(pressure, temperature, mixing_ratio)
return (-mpconsts.g * rho * w).to('Pa/s')
@exporter.export
@preprocess_and_wrap(
wrap_like='omega',
broadcast=('omega', 'pressure', 'temperature', 'mixing_ratio')
)
@check_units('[pressure]/[time]', '[pressure]', '[temperature]')
def vertical_velocity(omega, pressure, temperature, mixing_ratio=0):
r"""Calculate w from omega assuming hydrostatic conditions.
This function converts vertical velocity with respect to pressure
:math:`\left(\omega = \frac{Dp}{Dt}\right)` to that with respect to height
:math:`\left(w = \frac{Dz}{Dt}\right)` assuming hydrostatic conditions on
the synoptic scale. By Equation 7.33 in [Hobbs2006]_,
.. math:: \omega \simeq -\rho g w
so that
.. math:: w \simeq \frac{- \omega}{\rho g}
Density (:math:`\rho`) is calculated using the :func:`density` function,
from the given pressure and temperature. If `mixing_ratio` is given, the virtual
temperature correction is used, otherwise, dry air is assumed.
Parameters
----------
omega: `pint.Quantity`
Vertical velocity in terms of pressure
pressure: `pint.Quantity`
Total atmospheric pressure
temperature: `pint.Quantity`
Air temperature
mixing_ratio: `pint.Quantity`, optional
Mixing ratio of air
Returns
-------
`pint.Quantity`
Vertical velocity in terms of height (in meters / second)
See Also
--------
density, vertical_velocity_pressure
"""
rho = density(pressure, temperature, mixing_ratio)
return (omega / (- mpconsts.g * rho)).to('m/s')
@exporter.export
@preprocess_and_wrap(wrap_like='dewpoint', broadcast=('dewpoint', 'pressure'))
@check_units('[pressure]', '[temperature]')
def specific_humidity_from_dewpoint(pressure, dewpoint):
r"""Calculate the specific humidity from the dewpoint temperature and pressure.
Parameters
----------
dewpoint: `pint.Quantity`
Dewpoint temperature
pressure: `pint.Quantity`
Pressure
Returns
-------
`pint.Quantity`
Specific humidity
.. versionchanged:: 1.0
Changed signature from ``(dewpoint, pressure)``
See Also
--------
mixing_ratio, saturation_mixing_ratio
"""
mixing_ratio = saturation_mixing_ratio(pressure, dewpoint)
return specific_humidity_from_mixing_ratio(mixing_ratio)
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]', '[temperature]', '[temperature]')
def lifted_index(pressure, temperature, parcel_profile):
"""Calculate Lifted Index from the pressure temperature and parcel profile.
Lifted index formula derived from [Galway1956]_ and referenced by [DoswellSchultz2006]_:
LI = T500 - Tp500
where:
T500 is the measured temperature at 500 hPa
Tp500 is the temperature of the lifted parcel at 500 hPa
Calculation of the lifted index is defined as the temperature difference between the
observed 500 hPa temperature and the temperature of a parcel lifted from the
surface to 500 hPa.
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure level(s) of interest, in order from highest to
lowest pressure
temperature : `pint.Quantity`
Atmospheric temperature corresponding to pressure
parcel_profile : `pint.Quantity`
Temperature profile of the parcel
Returns
-------
`pint.Quantity`
Lifted Index
"""
# find the index for the 500 hPa pressure level.
idx = np.where(pressure == units.Quantity(500, 'hPa'))
# find the measured temperature at 500 hPa.
t500 = temperature[idx]
# find the parcel profile temperature at 500 hPa.
tp500 = parcel_profile[idx]
# calculate the lifted index.
lifted_index = t500 - tp500.to(units.degC)
return lifted_index
@exporter.export
@add_vertical_dim_from_xarray
@preprocess_and_wrap(
wrap_like='potential_temperature',
broadcast=('height', 'potential_temperature', 'u', 'v')
)
@check_units('[length]', '[temperature]', '[speed]', '[speed]')
def gradient_richardson_number(height, potential_temperature, u, v, vertical_dim=0):
r"""Calculate the gradient (or flux) Richardson number.
.. math:: Ri = (g/\theta) * \frac{\left(\partial \theta/\partial z\)}
{[\left(\partial u / \partial z\right)^2 + \left(\partial v / \partial z\right)^2}
See [Holton2004]_ pg. 121-122. As noted by [Holton2004]_, flux Richardson
number values below 0.25 indicate turbulence.
Parameters
----------
height : `pint.Quantity`
Atmospheric height
potential_temperature : `pint.Quantity`
Atmospheric potential temperature
u : `pint.Quantity`
X component of the wind
v : `pint.Quantity`
y component of the wind
vertical_dim : int, optional
The axis corresponding to vertical, defaults to 0. Automatically determined from
xarray DataArray arguments.
Returns
-------
`pint.Quantity`
Gradient Richardson number
"""
dthetadz = first_derivative(potential_temperature, x=height, axis=vertical_dim)
dudz = first_derivative(u, x=height, axis=vertical_dim)
dvdz = first_derivative(v, x=height, axis=vertical_dim)
return (mpconsts.g / potential_temperature) * (dthetadz / (dudz ** 2 + dvdz ** 2))
@exporter.export
@preprocess_and_wrap()
@check_units('[pressure]', '[temperature]', '[temperature]')
def showalter_index(pressure, temperature, dewpt):
"""Calculate Showalter Index from pressure temperature and 850 hPa lcl.
Showalter Index derived from [Galway1956]_:
SI = T500 - Tp500
where:
T500 is the measured temperature at 500 hPa
Tp500 is the temperature of the lifted parcel at 500 hPa
Parameters
----------
pressure : `pint.Quantity`
Atmospheric pressure level(s) of interest, in order from highest to
lowest pressure
temperature : `pint.Quantity`
Parcel temperature for corresponding pressure
dewpt : `pint.Quantity`
Parcel dew point temperatures for corresponding pressure
Returns
-------
`pint.Quantity`
Showalter index
"""
# find the measured temperature and dew point temperature at 850 hPa.
t850, td850 = interpolate_1d(units.Quantity(850, 'hPa'), pressure, temperature, dewpt)
# find the parcel profile temperature at 500 hPa.
tp500 = interpolate_1d(units.Quantity(500, 'hPa'), pressure, temperature)
# Calculate lcl at the 850 hPa level
lcl_calc, _ = lcl(units.Quantity(850, 'hPa'), t850[0], td850[0])
# Define end height for moist lapse rate calculation
p_end = units.Quantity(500, 'hPa')
# Calculate parcel temp when raised dry adiabatically from surface to lcl
dl = dry_lapse(lcl_calc, temperature[0], pressure[0])
# Calculate parcel temp when raised moist adiabatically from lcl to 500mb
ml = moist_lapse(p_end, dl, lcl_calc)
# Calculate the Showalter index
return tp500 - ml
| 33.538691 | 95 | 0.66607 |
7941d8fb2d5f70a785138454856ad5b071f37c07 | 4,077 | py | Python | datasets/lj_speech/lj_speech.py | MitchellTesla/datasets | bf08ea3f95e8209a7afd2b50410ad5db51409d11 | [
"Apache-2.0"
] | 3,395 | 2020-05-13T21:16:50.000Z | 2020-09-10T14:36:50.000Z | datasets/lj_speech/lj_speech.py | MitchellTesla/datasets | bf08ea3f95e8209a7afd2b50410ad5db51409d11 | [
"Apache-2.0"
] | 370 | 2020-05-13T21:28:57.000Z | 2020-09-10T11:03:38.000Z | datasets/lj_speech/lj_speech.py | MitchellTesla/datasets | bf08ea3f95e8209a7afd2b50410ad5db51409d11 | [
"Apache-2.0"
] | 258 | 2020-05-15T01:17:09.000Z | 2020-09-10T12:41:43.000Z | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""LJ automatic speech recognition dataset."""
import csv
import os
import datasets
from datasets.tasks import AutomaticSpeechRecognition
_CITATION = """\
@misc{ljspeech17,
author = {Keith Ito and Linda Johnson},
title = {The LJ Speech Dataset},
howpublished = {\\url{https://keithito.com/LJ-Speech-Dataset/}},
year = 2017
}
"""
_DESCRIPTION = """\
This is a public domain speech dataset consisting of 13,100 short audio clips of a single speaker reading
passages from 7 non-fiction books in English. A transcription is provided for each clip. Clips vary in length
from 1 to 10 seconds and have a total length of approximately 24 hours.
Note that in order to limit the required storage for preparing this dataset, the audio
is stored in the .wav format and is not converted to a float32 array. To convert the audio
file to a float32 array, please make use of the `.map()` function as follows:
```python
import soundfile as sf
def map_to_array(batch):
speech_array, _ = sf.read(batch["file"])
batch["speech"] = speech_array
return batch
dataset = dataset.map(map_to_array, remove_columns=["file"])
```
"""
_URL = "https://keithito.com/LJ-Speech-Dataset/"
_DL_URL = "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"
class LJSpeech(datasets.GeneratorBasedBuilder):
"""LJ Speech dataset."""
VERSION = datasets.Version("1.1.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="main", version=VERSION, description="The full LJ Speech dataset"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"audio": datasets.Audio(sampling_rate=22050),
"file": datasets.Value("string"),
"text": datasets.Value("string"),
"normalized_text": datasets.Value("string"),
}
),
supervised_keys=("file", "text"),
homepage=_URL,
citation=_CITATION,
task_templates=[AutomaticSpeechRecognition(audio_column="audio", transcription_column="text")],
)
def _split_generators(self, dl_manager):
root_path = dl_manager.download_and_extract(_DL_URL)
root_path = os.path.join(root_path, "LJSpeech-1.1")
wav_path = os.path.join(root_path, "wavs")
csv_path = os.path.join(root_path, "metadata.csv")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN, gen_kwargs={"wav_path": wav_path, "csv_path": csv_path}
),
]
def _generate_examples(self, wav_path, csv_path):
"""Generate examples from an LJ Speech archive_path."""
with open(csv_path, encoding="utf-8") as csv_file:
csv_reader = csv.reader(csv_file, delimiter="|", quotechar=None, skipinitialspace=True)
for row in csv_reader:
uid, text, norm_text = row
filename = f"{uid}.wav"
example = {
"id": uid,
"file": os.path.join(wav_path, filename),
"audio": os.path.join(wav_path, filename),
"text": text,
"normalized_text": norm_text,
}
yield uid, example
| 34.846154 | 109 | 0.640177 |
7941d90d5d685319d17da28a739766de1175b9c9 | 1,209 | py | Python | models/command_wrapper.py | Minigrim0/DiscordReminderBot | 9b94c989fd3573fc2b2f8e2f389505bfa6e5eb85 | [
"MIT"
] | 2 | 2021-04-02T12:44:30.000Z | 2021-04-02T12:50:50.000Z | models/command_wrapper.py | Minigrim0/DiscordReminderBot | 9b94c989fd3573fc2b2f8e2f389505bfa6e5eb85 | [
"MIT"
] | 4 | 2021-04-02T22:54:20.000Z | 2021-04-26T09:02:36.000Z | models/command_wrapper.py | Minigrim0/DiscordReminderBot | 9b94c989fd3573fc2b2f8e2f389505bfa6e5eb85 | [
"MIT"
] | 2 | 2021-03-02T17:07:14.000Z | 2021-03-27T15:29:30.000Z | from discord import Embed
class CommandWrapper:
"""Represents a command, with its help text, its description and the command in itself"""
def __init__(self, fun: callable):
self.description = None
self.command = None
self.fun = fun
self.help = ""
async def __call__(self, *args, **kwargs):
await self.fun(*args, **kwargs)
def __str__(self):
return f"\nCommand: {self.command}\n\tname: {self.fun.__name__}\n\tdescription: {self.description}"
def __repr__(self):
return str(self)
def __eq__(self, command):
return command == self.command
def asEmbed(self) -> Embed:
"""Generates an Embed representing the command
Returns:
Embed: The embed of the command
"""
em = Embed(title=self.fun.__name__, description=self.description)
em.add_field(name="parameters", value=self.help)
return em
@property
def asEmbedPart(self) -> list:
description = f"**help:**\n {self.help}\n**description:**\n {self.description}"
return {
"name": self.fun.__name__,
"value": description,
"inline": False
}
| 27.477273 | 107 | 0.598015 |
7941d915b6363ecbdf98bde2599ec77ea5305f91 | 343 | py | Python | 0x04-python-more_data_structures/102-complex_delete.py | oluwaseun-ebenezer/holbertonschool-higher_level_programming | e830f969d3ca71abf0a2f6d4f7c64a82337eccd7 | [
"MIT"
] | null | null | null | 0x04-python-more_data_structures/102-complex_delete.py | oluwaseun-ebenezer/holbertonschool-higher_level_programming | e830f969d3ca71abf0a2f6d4f7c64a82337eccd7 | [
"MIT"
] | null | null | null | 0x04-python-more_data_structures/102-complex_delete.py | oluwaseun-ebenezer/holbertonschool-higher_level_programming | e830f969d3ca71abf0a2f6d4f7c64a82337eccd7 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# 102-complex_delete.py
def complex_delete(a_dictionary, value):
"""Delete keys with a specific value in a dictionary."""
while value in a_dictionary.values():
for k, v in a_dictionary.items():
if v == value:
del a_dictionary[k]
break
return (a_dictionary)
| 24.5 | 60 | 0.606414 |
7941dae7b60ce8980c84d9a03156e4431838e8f7 | 652 | py | Python | analyzer/models/v3.py | AmirSalari/DoHLyzer | 07e0a1e64310dad779934a97ebe7db2a03eff3d9 | [
"Unlicense"
] | 23 | 2021-04-14T23:46:19.000Z | 2022-03-31T04:46:47.000Z | analyzer/models/v3.py | AmirSalari/DoHLyzer | 07e0a1e64310dad779934a97ebe7db2a03eff3d9 | [
"Unlicense"
] | 5 | 2021-04-19T20:12:20.000Z | 2022-02-10T02:12:12.000Z | analyzer/models/v3.py | AmirSalari/DoHLyzer | 07e0a1e64310dad779934a97ebe7db2a03eff3d9 | [
"Unlicense"
] | 8 | 2021-04-15T06:50:35.000Z | 2022-01-15T00:44:45.000Z | from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM, Conv1D, MaxPool1D, Flatten
def create_model(segment_size):
model = Sequential()
model.add(Conv1D(segment_size * 2, kernel_size=3, input_shape=(segment_size, 5), activation='relu'))
model.add(MaxPool1D())
model.add(Flatten())
model.add(Dense(segment_size * 6, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(segment_size * 2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
return model
| 38.352941 | 104 | 0.70092 |
7941db5edd18eca063a39158e20d1754a4bd61cd | 4,509 | py | Python | tests/app_test.py | jimtheplant/bioplatform | 5097fae3a03c48338b552ad15d02b29e408ddbb9 | [
"Apache-2.0"
] | null | null | null | tests/app_test.py | jimtheplant/bioplatform | 5097fae3a03c48338b552ad15d02b29e408ddbb9 | [
"Apache-2.0"
] | null | null | null | tests/app_test.py | jimtheplant/bioplatform | 5097fae3a03c48338b552ad15d02b29e408ddbb9 | [
"Apache-2.0"
] | null | null | null | import datetime
import uuid
from unittest.mock import Mock
import graphene
import pytest
from bioplatform.app import app_factory
from bioplatform.util import Status
BASE_SCALAR_TEST_DATA = [
{
"queries": [
{
"class_name": "SimpleStringSingleField",
"query_fields": [
("test1", graphene.String(), "Testing")
]
}
]
},
{
"queries": [
{
"class_name": "SimpleIntSingleField",
"query_fields": [
("test1", graphene.Int(), 1)
]
}
]
},
{
"queries": [
{
"class_name": "SimpleFloatSingleField",
"query_fields": [
("test1", graphene.Float(), 1.0)
]
}
]
},
{
"queries": [
{
"class_name": "SimpleBooleanSingleField",
"query_fields": [
("test1", graphene.Boolean(), True)
]
}
]
},
{
"queries": [
{
"class_name": "SimpleIDSingleField",
"query_fields": [
("test1", graphene.ID(), str(uuid.uuid4()))
]
}
]
},
{
"queries": [
{
"class_name": "SimpleDateSingleField",
"query_fields": [
("test1", graphene.Date(), datetime.date(2019, 9, 15))
]
}
]
},
{
"queries": [
{
"class_name": "SimpleDateTimeSingleField",
"query_fields": [
("test1", graphene.Date(), datetime.date(2019, 9, 15))
]
}
]
},
]
@pytest.fixture()
def simple_query_factory():
def _simple_query_factory(query_name, query_fields):
query_dict = {}
for field in query_fields:
query_dict.update(field)
return type(query_name, (graphene.ObjectType,), query_dict)
return _simple_query_factory
@pytest.fixture()
def query_field_factory():
def _query_field_factory(field_name, field_type, field_value):
return {
field_name: field_type, "resolve_" + field_name: lambda self, info: field_value
}
return _query_field_factory
@pytest.fixture()
def initializer_mock_factory():
def _mock_initializer(return_value):
mock_initializer = Mock()
mock_initializer.init.return_value = return_value
return mock_initializer
return _mock_initializer
@pytest.fixture()
def initializers_factory(initializer_mock_factory, query_field_factory, simple_query_factory):
def _initializers_factory(param):
initializers = []
for query_info in param["queries"]:
query_fields = []
for field in query_info["query_fields"]:
query_fields.append(query_field_factory(*field))
query = simple_query_factory(query_info["class_name"], query_fields)
initializers.append(initializer_mock_factory(query))
return initializers
return _initializers_factory
def test_app_factory_no_initializers():
app = app_factory([])
schema = app.routes[0].app.schema
assert app.routes[0].path == "/"
response = schema.execute("{appStatus}")
assert response.data == {'appStatus': Status.OK.value}
def format_expected_response(field_type, data):
if type(field_type) == graphene.Date:
return data.strftime("%Y-%m-%d")
return data
@pytest.mark.parametrize("param", BASE_SCALAR_TEST_DATA)
def test_app_factory_initializers_base_scalars_no_dates(initializers_factory, param):
initializers = initializers_factory(param)
app = app_factory(initializers)
schema = app.routes[0].app.schema
assert app.routes[0].path == "/"
response = schema.execute("{appStatus}")
assert response.data == {'appStatus': Status.OK.value}
queries_to_test = []
for query_info in param["queries"]:
for query_field_info in query_info["query_fields"]:
queries_to_test.append(query_field_info)
for query in queries_to_test:
response = schema.execute("{" + query[0] + "}")
assert response.errors is None
assert response.data == {
query[0]: format_expected_response(query[1], query[2])
}
| 26.839286 | 94 | 0.556886 |
7941dba5828a88f19e8edd7999d205c7dc73817e | 1,019 | py | Python | manga_py/providers/readcomiconline_to.py | sonvt1710/manga-py | 848a78e93b890af0c92056a1a9fc7f6ce5707cf6 | [
"MIT"
] | 337 | 2019-08-27T16:14:50.000Z | 2022-03-29T09:58:22.000Z | manga_py/providers/readcomiconline_to.py | sonvt1710/manga-py | 848a78e93b890af0c92056a1a9fc7f6ce5707cf6 | [
"MIT"
] | 225 | 2019-08-25T15:02:01.000Z | 2022-03-31T06:36:09.000Z | manga_py/providers/readcomiconline_to.py | sonvt1710/manga-py | 848a78e93b890af0c92056a1a9fc7f6ce5707cf6 | [
"MIT"
] | 41 | 2019-10-04T13:28:02.000Z | 2022-03-19T08:18:34.000Z | from manga_py.provider import Provider
from .helpers.std import Std
class ReadComicOnlineTo(Provider, Std):
def get_archive_name(self) -> str:
chapter = self.re.search(r'id=(\d+)', self.chapter).group(1)
return self.normal_arc_name([self.chapter_id, chapter])
def get_chapter_index(self, no_increment=False) -> str:
return str(self.chapter_id)
def get_content(self):
return self._get_content(r'{}/Comic/{}')
def get_manga_name(self) -> str:
return self._get_name(r'/Comic/([^/]+)')
def get_chapters(self):
return self._elements('table.listing td > a')
def prepare_cookies(self):
self.http().cookies['rco_quality'] = 'hq'
def get_files(self):
content = self.http_get(self.chapter + '&readType=1')
items = self.re.findall(r'lstImages.push\("([^"]+)"\)', content)
return items
def get_cover(self):
return self._cover_from_content('.rightBox .barContent img[width]')
main = ReadComicOnlineTo
| 29.114286 | 75 | 0.651619 |
7941dc35ed19bdd429a600103d6142c5d347f9ff | 11,855 | py | Python | rdflib/plugins/parsers/pyRdfa/property.py | gromgull/rdflib | 7c90f646e3734ee6d3081b5d3f699f0f501f6a39 | [
"BSD-3-Clause"
] | 4 | 2019-01-07T06:55:58.000Z | 2021-07-16T13:34:58.000Z | rdflib/plugins/parsers/pyRdfa/property.py | gromgull/rdflib | 7c90f646e3734ee6d3081b5d3f699f0f501f6a39 | [
"BSD-3-Clause"
] | null | null | null | rdflib/plugins/parsers/pyRdfa/property.py | gromgull/rdflib | 7c90f646e3734ee6d3081b5d3f699f0f501f6a39 | [
"BSD-3-Clause"
] | 2 | 2018-05-01T13:18:13.000Z | 2018-11-15T04:58:05.000Z | # -*- coding: utf-8 -*-
"""
Implementation of the C{@property} value handling.
RDFa 1.0 and RDFa 1.1 are fairly different. RDFa 1.0 generates only literals, see
U{RDFa Task Force's wiki page<http://www.w3.org/2006/07/SWD/wiki/RDFa/LiteralObject>} for the details.
On the other hand, RDFa 1.1, beyond literals, can also generate URI references. Hence the duplicate method in the L{ProcessProperty} class, one for RDFa 1.0 and the other for RDFa 1.1.
@summary: RDFa Literal generation
@requires: U{RDFLib package<http://rdflib.net>}
@organization: U{World Wide Web Consortium<http://www.w3.org>}
@author: U{Ivan Herman<a href="http://www.w3.org/People/Ivan/">}
@license: This software is available for use under the
U{W3C® SOFTWARE NOTICE AND LICENSE<href="http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231">}
"""
"""
$Id: property.py,v 1.11 2012/06/12 11:47:11 ivan Exp $
$Date: 2012/06/12 11:47:11 $
"""
import re, sys
from rdflib import BNode
from rdflib import Literal, URIRef
from rdflib import RDF as ns_rdf
from rdflib.term import XSDToPython
from . import IncorrectBlankNodeUsage, IncorrectLiteral, err_no_blank_node
from .utils import has_one_of_attributes, return_XML
XMLLiteral = ns_rdf["XMLLiteral"]
HTMLLiteral = URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#HTML")
class ProcessProperty :
"""Generate the value for C{@property} taking into account datatype, etc.
Note: this class is created only if the C{@property} is indeed present, no need to check.
@ivar node: DOM element node
@ivar graph: the (RDF) graph to add the properies to
@ivar subject: the RDFLib URIRef serving as a subject for the generated triples
@ivar state: the current state to be used for the CURIE-s
@type state: L{state.ExecutionContext}
@ivar typed_resource: Typically the bnode generated by a @typeof
"""
def __init__(self, node, graph, subject, state, typed_resource = None) :
"""
@param node: DOM element node
@param graph: the (RDF) graph to add the properies to
@param subject: the RDFLib URIRef serving as a subject for the generated triples
@param state: the current state to be used for the CURIE-s
@param state: L{state.ExecutionContext}
@param typed_resource: Typically the bnode generated by a @typeof; in RDFa 1.1, that becomes the object for C{@property}
"""
self.node = node
self.graph = graph
self.subject = subject
self.state = state
self.typed_resource = typed_resource
def generate(self) :
"""
Common entry point for the RDFa 1.0 and RDFa 1.1 versions; bifurcates based on the RDFa version, as retrieved from the state object.
"""
if self.state.rdfa_version >= "1.1" :
self.generate_1_1()
else :
self.generate_1_0()
def generate_1_1(self) :
"""Generate the property object, 1.1 version"""
#########################################################################
# See if the target is _not_ a literal
irirefs = ("resource", "href", "src")
noiri = ("content", "datatype", "rel", "rev")
notypediri = ("content", "datatype", "rel", "rev", "about", "about_pruned")
if has_one_of_attributes(self.node, irirefs) and not has_one_of_attributes(self.node, noiri) :
# @href/@resource/@src takes the lead here...
object = self.state.getResource(irirefs)
elif self.node.hasAttribute("typeof") and not has_one_of_attributes(self.node, notypediri) and self.typed_resource != None :
# a @typeof creates a special branch in case the typed resource was set during parsing
object = self.typed_resource
else :
# We have to generate a literal
# Get, if exists, the value of @datatype
datatype = ''
dtset = False
if self.node.hasAttribute("datatype") :
dtset = True
dt = self.node.getAttribute("datatype")
if dt != "" :
datatype = self.state.getURI("datatype")
# Supress lange is set in case some elements explicitly want to supress the effect of language
# There were discussions, for example, that the <time> element should do so. Although,
# after all, this was reversed, the functionality is kept in the code in case another
# element might need it...
if self.state.lang != None and self.state.supress_lang == False :
lang = self.state.lang
else :
lang = ''
# The simple case: separate @content attribute
if self.node.hasAttribute("content") :
val = self.node.getAttribute("content")
# Handling the automatic uri conversion case
if dtset == False :
object = Literal(val, lang=lang)
else :
object = self._create_Literal(val, datatype=datatype, lang=lang)
# The value of datatype has been set, and the keyword parameters take care of the rest
else :
# see if there *is* a datatype (even if it is empty!)
if dtset :
if datatype == XMLLiteral :
litval = self._get_XML_literal(self.node)
object = Literal(litval,datatype=XMLLiteral)
elif datatype == HTMLLiteral :
# I am not sure why this hack is necessary, but otherwise an encoding error occurs
# In Python3 all this should become moot, due to the unicode everywhere approach...
if sys.version_info[0] >= 3 :
object = Literal(self._get_HTML_literal(self.node), datatype=HTMLLiteral)
else :
litval = self._get_HTML_literal(self.node)
o = Literal(litval, datatype=XMLLiteral)
object = Literal(o, datatype=HTMLLiteral)
else :
object = self._create_Literal(self._get_literal(self.node), datatype=datatype, lang=lang)
else :
object = self._create_Literal(self._get_literal(self.node), lang=lang)
if object != None :
for prop in self.state.getURI("property") :
if not isinstance(prop, BNode) :
if self.node.hasAttribute("inlist") :
self.state.add_to_list_mapping(prop, object)
else :
self.graph.add( (self.subject, prop, object) )
else :
self.state.options.add_warning(err_no_blank_node % "property", warning_type=IncorrectBlankNodeUsage, node=self.node.nodeName)
# return
def generate_1_0(self) :
"""Generate the property object, 1.0 version"""
#########################################################################
# We have to generate a literal indeed.
# Get, if exists, the value of @datatype
datatype = ''
dtset = False
if self.node.hasAttribute("datatype") :
dtset = True
dt = self.node.getAttribute("datatype")
if dt != "" :
datatype = self.state.getURI("datatype")
if self.state.lang != None :
lang = self.state.lang
else :
lang = ''
# The simple case: separate @content attribute
if self.node.hasAttribute("content") :
val = self.node.getAttribute("content")
# Handling the automatic uri conversion case
if dtset == False :
object = Literal(val, lang=lang)
else :
object = self._create_Literal(val, datatype=datatype, lang=lang)
# The value of datatype has been set, and the keyword parameters take care of the rest
else :
# see if there *is* a datatype (even if it is empty!)
if dtset :
# yep. The Literal content is the pure text part of the current element:
# We have to check whether the specified datatype is, in fact, an
# explicit XML Literal
if datatype == XMLLiteral :
litval = self._get_XML_literal(self.node)
object = Literal(litval,datatype=XMLLiteral)
elif datatype == HTMLLiteral :
# I am not sure why this hack is necessary, but otherwise an encoding error occurs
# In Python3 all this should become moot, due to the unicode everywhere approach...
if sys.version_info[0] >= 3 :
object = Literal(self._get_HTML_literal(self.node), datatype=HTMLLiteral)
else :
litval = self._get_HTML_literal(self.node)
o = Literal(litval, datatype=XMLLiteral)
object = Literal(o, datatype=HTMLLiteral)
else :
object = self._create_Literal(self._get_literal(self.node), datatype=datatype, lang=lang)
else :
# no controlling @datatype. We have to see if there is markup in the contained
# element
if True in [ n.nodeType == self.node.ELEMENT_NODE for n in self.node.childNodes ] :
# yep, and XML Literal should be generated
object = self._create_Literal(self._get_XML_literal(self.node), datatype=XMLLiteral)
else :
# At this point, there might be entities in the string that are returned as real characters by the dom
# implementation. That should be turned back
object = self._create_Literal(self._get_literal(self.node), lang=lang)
for prop in self.state.getURI("property") :
if not isinstance(prop,BNode) :
self.graph.add( (self.subject,prop,object) )
else :
self.state.options.add_warning(err_no_blank_node % "property", warning_type=IncorrectBlankNodeUsage, node=self.node.nodeName)
# return
######################################################################################################################################
def _putBackEntities(self, str) :
"""Put 'back' entities for the '&','<', and '>' characters, to produce a proper XML string.
Used by the XML Literal extraction.
@param str: string to be converted
@return: string with entities
@rtype: string
"""
return str.replace('&','&').replace('<','<').replace('>','>')
def _get_literal(self, Pnode):
"""
Get (recursively) the full text from a DOM Node.
@param Pnode: DOM Node
@return: string
"""
rc = ""
for node in Pnode.childNodes:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
elif node.nodeType == node.ELEMENT_NODE :
rc = rc + self._get_literal(node)
# The decision of the group in February 2008 is not to normalize the result by default.
# This is reflected in the default value of the option
if self.state.options.space_preserve :
return rc
else :
return re.sub(r'(\r| |\n|\t)+'," ",rc).strip()
# end getLiteral
def _get_XML_literal(self, Pnode) :
"""
Get (recursively) the XML Literal content of a DOM Node.
@param Pnode: DOM Node
@return: string
"""
rc = ""
for node in Pnode.childNodes:
if node.nodeType == node.TEXT_NODE:
rc = rc + self._putBackEntities(node.data)
elif node.nodeType == node.ELEMENT_NODE :
rc = rc + return_XML(self.state, node, base = False)
return rc
# end getXMLLiteral
def _get_HTML_literal(self, Pnode) :
"""
Get (recursively) the XML Literal content of a DOM Node.
@param Pnode: DOM Node
@return: string
"""
rc = ""
for node in Pnode.childNodes:
if node.nodeType == node.TEXT_NODE:
rc = rc + self._putBackEntities(node.data)
elif node.nodeType == node.ELEMENT_NODE :
rc = rc + return_XML(self.state, node, base = False, xmlns = False )
return rc
# end getXMLLiteral
def _create_Literal(self, val, datatype = '', lang = '') :
"""
Create a literal, taking into account the datatype and language.
@return: Literal
"""
if datatype == None or datatype == '' :
return Literal(val, lang=lang)
#elif datatype == ns_xsd["string"] :
# return Literal(val)
else :
# This is a bit convoluted... the default setup of rdflib does not gracefully react if the
# datatype cannot properly be converted to Python. I have to copy and reuse some of the
# rdflib code to get this working...
# To make things worse: rdlib 3.1.0 does not handle the various xsd date types properly, ie,
# the conversion function below will generate errors. Ie, the check should be skipped for those
convFunc = XSDToPython.get(datatype, None)
if convFunc :
try :
pv = convFunc(val)
# If we got there the literal value and its datatype match
except :
self.state.options.add_warning("Incompatible value (%s) and datatype (%s) in Literal definition." % (val, datatype), warning_type=IncorrectLiteral, node=self.node.nodeName)
return Literal(val, datatype=datatype)
| 38.868852 | 184 | 0.677436 |
7941dce19c4a933853cfcaeead8c12a4d93dd05d | 1,555 | py | Python | tests/test_base.py | jonancm/django-quantity-field | a1966b2db4e25358ef6520b80cc6b57f90cc847a | [
"MIT"
] | 4 | 2018-02-24T02:54:44.000Z | 2021-10-09T12:40:58.000Z | tests/test_base.py | jonancm/django-quantity-field | a1966b2db4e25358ef6520b80cc6b57f90cc847a | [
"MIT"
] | 1 | 2021-10-20T02:40:24.000Z | 2021-10-20T02:40:24.000Z | tests/test_base.py | jonancm/django-quantity-field | a1966b2db4e25358ef6520b80cc6b57f90cc847a | [
"MIT"
] | 2 | 2019-02-05T02:19:17.000Z | 2019-11-06T21:47:32.000Z | # coding: utf-8
import unittest
from quantity_field import ureg
from quantity_field.base import MultiQuantity
class MultiQuantityTest(unittest.TestCase):
def test_single_value(self):
self.assertIsInstance(MultiQuantity(0), ureg.Quantity)
self.assertEqual(MultiQuantity(3.5, ureg.kg), 3.5 * ureg.kg)
self.assertEqual(MultiQuantity.from_string('42 kilogram'), 42 * ureg.kg)
mq = MultiQuantity.from_list(100, ureg.m)
self.assertEqual(mq, 100 * ureg.m)
self.assertEqual(mq.units, ureg.m)
self.assertEqual(mq[0], 100 * ureg.m)
self.assertEqual(mq.dim, 1)
self.assertEqual(mq * 2 * ureg.m, 200 * ureg.m * ureg.m)
self.assertGreater(mq, 50 * ureg.m)
self.assertEqual(unicode(mq), '100.0 meter')
self.assertEqual(mq.to_string(), '100.0 meter')
self.assertEqual(mq.to_list(), [100, 'meter'])
def test_multi_value(self):
self.assertEqual(MultiQuantity.from_string('2.5*4*3 meter'), 30 * ureg.m ** 3)
mq = MultiQuantity.from_list(2.5, 4, ureg.m)
self.assertEqual(mq, 10 * ureg.m ** 2)
self.assertEqual(mq.units, ureg.m ** 2)
self.assertEqual([q for q in mq], [2.5 * ureg.m, 4 * ureg.m])
self.assertEqual(mq.dim, 2)
self.assertEqual(mq * 2 * ureg.m, 20 * ureg.m ** 3)
self.assertGreater(mq, 5 * ureg.m ** 2)
self.assertEqual(unicode(mq), '2.5*4.0 meter')
self.assertEqual(mq.to_string(), '2.5*4.0 meter')
self.assertEqual(mq.to_list(), [2.5, 4, 'meter'])
| 35.340909 | 86 | 0.626367 |
7941dd126eaaf30d0da82004367c9b0aa09ffb30 | 28,247 | py | Python | dataset_reader.py | DREAM-ODA-OS/CloudfreeCoverage | ab1fcac9c94c9a876c8294f968c8fe2e4ba17266 | [
"Unlicense",
"MIT"
] | null | null | null | dataset_reader.py | DREAM-ODA-OS/CloudfreeCoverage | ab1fcac9c94c9a876c8294f968c8fe2e4ba17266 | [
"Unlicense",
"MIT"
] | null | null | null | dataset_reader.py | DREAM-ODA-OS/CloudfreeCoverage | ab1fcac9c94c9a876c8294f968c8fe2e4ba17266 | [
"Unlicense",
"MIT"
] | null | null | null | #!/usr/bin/env python
#
#------------------------------------------------------------------------------
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
#
#
# Dataset reader module of the create_cloudless.py
# --- currently containing Readers for:
# -- Reader Class (General)
# - CF_landsat5_2a_Reader
# - CF_spot4take5_n2a_pente_Reader
# - CF_cryoland_Reader
#
# - CF_landsat5_f_Reader
# - CF_spot4take5_f_Reader
# - CF_landsat5_m_Reader
# - CF_cryoland_local_Reader
#
#
# for internal testing:
# '_f' = means located at hard disk
# '_w' = means accessible via WCS service
# '_m' = means use of the "local-mixed" dataset
#
#
# Project: DeltaDREAM
# Name: dataset_reader.py
# Authors: Christian Schiller <christian dot schiller at eox dot at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2014 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
#
#
import sys
import os
import os.path
import time
import fnmatch
import datetime
from util import parse_xml, print_log
import wcs_client
wcs = wcs_client.wcsClient()
#/************************************************************************/
#/* findfile() */
#/************************************************************************/
def findfile(indir, inmask):
"""
literal_directory, basename_pattern (simple shell-style wildcards), includes dot-files
no regex, but constructs like e.g. L5_[!a-f]*.tif , are possible
"""
filelist = []
for root, dd, files in os.walk(indir):
for ff in files:
if fnmatch.fnmatchcase(ff, inmask):
filelist.append( os.path.join(root, ff) )
return filelist
#/************************************************************************/
#/* get_taget_list() */
#/************************************************************************/
def get_daterange(from_date, composite_range):
"""
calculate the new date for the time interval depending on the composite_range
"""
from_date = str(from_date)
composite_range = int(composite_range) # consider the starting day
from_year = int(from_date[0:4])
from_month = int(from_date[4:6])
from_day = int(from_date[6:8])
time_stamp = datetime.datetime(day=from_day, month=from_month, year=from_year )
difference = time_stamp + datetime.timedelta(days=int(composite_range))
to_date ='%.4d-%.2d-%.2d' % (difference.year, difference.month, difference.day)
return to_date
#/************************************************************************/
#/* Reader() */
#/************************************************************************/
class Reader(object):
"""
Reader class
- gathering information about filenames, dates, etc.
- provide the listing of Base-files, Base-masks, GFP-files and GFP-masks to be used
"""
def __init__(self):
pass
#---------
def get_filelist(self, input_params, settings):
"""
uses WCS requests to generate filelist of files available at service/server
"""
cov_list = self.base_desceocover(input_params, settings, mask=False)
# check if there is realy a list of datasets returned or an error msg
if type(cov_list) is str: # and cov_list.find('numberMatched="0"') is not -1:
err_msg = '[Error] -- No Datasets found. Service returned the follwing information.'
print_log(settings, err_msg)
print_log(settings, cov_list)
sys.exit()
mask_list = self.base_desceocover(input_params, settings, mask=True)
if type(mask_list) is str: # and cov_list.find('numberMatched="0"') is not -1:
err_msg = '[Error] -- No Datasets found. Service returned the follwing information.'
print_log(settings, err_msg)
print_log(settings, cov_list)
sys.exit()
# split up the received listing - Base, Base_mask, GFPs, GFPMask
# (--> cryoland products do not have masks)
cnt = 0
base_flist = []
gfp_flist = []
for elem in cov_list:
idx = elem.find(input_params['toi'])
if idx > -1:
b_cov = cov_list.pop(cnt)
base_flist.append(b_cov)
cnt += 1
gfp_flist = list(cov_list)
base_mask_flist = []
gfpmask_flist = []
cnt = 0
for elem in mask_list:
idx = elem.find(input_params['toi'])
if idx > -1:
b_mask = mask_list.pop(cnt)
base_mask_flist.append(b_mask)
cnt += 1
gfpmask_flist = list(mask_list)
gfp_flist, gfpmask_flist = self.apply_scenario(gfp_flist, gfpmask_flist, input_params['scenario'], base_flist, base_mask_flist )
if len(base_flist) != len(base_mask_flist):
err_msg = 'Number of datafiles and number of cloud-masks do not correspond'
print_log(settings, err_msg)
sys.exit(4)
if len(gfp_flist) != len(gfpmask_flist):
err_msg = 'Number of datafiles and number of cloud-masks do not correspond'
print_log(settings, err_msg)
sys.exit(4)
return base_flist, base_mask_flist, gfp_flist, gfpmask_flist
#---------
def get_maskname(self, filename):
"""
set the mask filename filter and get the mask filename(-list)
return mask-filename or list of mask-filenames (if list is provided)
*) eg. CryoLand doesn't have mask files
"""
pass
#---------
def set_request_values(self, settings, input_params, mask):
"""
set the request parameters
"""
if mask is True:
searchkey = input_params['dataset'][:-3]+'_mask_clouds'
if settings.has_key('dataset.'+searchkey) is True:
service = settings['dataset.'+searchkey]
else:
searchkey = input_params['dataset']+'_clouds'
if settings.has_key('dataset.'+searchkey) is True:
service = settings['dataset.'+searchkey]
else:
searchkey = input_params['dataset'][:-6]+'_nua'
if settings.has_key('dataset.'+searchkey) is True:
service = settings['dataset.'+searchkey]
else:
service = settings['dataset.'+input_params['dataset']]
aoi_values = input_params['aoi']
toi_values = []
if input_params['scenario'] == 'T':
target_date = get_daterange(input_params['toi'], -input_params['period'])
in_date = get_daterange(input_params['toi'], 0)
toi_values.append(target_date)
toi_values.append(in_date)
if input_params['scenario'] == 'B':
target_date = get_daterange(input_params['toi'], input_params['period'])
in_date = get_daterange(input_params['toi'], 0)
toi_values.append(in_date)
toi_values.append(target_date)
if input_params['scenario'] == 'M':
tt = int((input_params['period']/2))
tt1 = int((input_params['period']/2.)+0.5) # correct for the rounding error
target_date = get_daterange(input_params['toi'], -tt)
toi_values.append(target_date)
target_date = get_daterange(input_params['toi'], tt1)
toi_values.append(target_date)
service1 = service.rsplit('EOID')[0]
dss = service.rsplit('EOID')[1][1:]
return service1, toi_values, aoi_values, dss
#---------
def base_desceocover(self, input_params, settings, mask):
"""
Send a DescribeEOCoverageSet request to the WCS Service, asking for the available Coverages, according
to the user defined AOI, TOI, and DatasetSeries. The function returns the available CoveragesIDs.
"""
target_server, toi_values, aoi_values, dss = self.set_request_values(settings, input_params, mask)
request = {'request': 'DescribeEOCoverageSet' ,
'server_url': target_server ,
'eoID': dss ,
'subset_lon': aoi_values[0]+','+aoi_values[1] ,
'subset_lat': aoi_values[2]+','+aoi_values[3] ,
'subset_time': toi_values[0]+'T00:00'+','+ toi_values[1]+'T23:59' ,
'IDs_only': True }
cids = wcs.DescribeEOCoverageSet(request, settings)
return cids
#---------
def apply_scenario(self, gfp_flist, gfpmask_flist, scenario, base_flist, base_mask_flist):
"""
apply the selected scenario i.e. sort the gfp lists accordingly
"""
if scenario == 'T':
gfp_flist.reverse()
gfpmask_flist.reverse()
return gfp_flist, gfpmask_flist
elif scenario == 'B':
gfp_flist.sort()
gfpmask_flist.sort()
return gfp_flist, gfpmask_flist
elif scenario == 'M':
gfp_tmp = list(gfp_flist)
gfp_masktmp = list(gfpmask_flist)
gfp_tmp.extend(base_flist)
gfp_masktmp.extend(base_mask_flist)
gfp_tmp.sort()
gfp_masktmp.sort()
toi_pos1 = gfp_tmp.index(base_flist[0])
toi_pos2 = gfp_masktmp.index(base_mask_flist[0])
newer_flist1 = gfp_tmp[toi_pos1+1:]
older_flist1 = gfp_tmp[:toi_pos1]
older_flist1.reverse()
newer_flist2 = gfp_masktmp[toi_pos2+1:]
older_flist2 = gfp_masktmp[:toi_pos2]
older_flist2.reverse()
# we always use the newer files first
gfp = map(None, newer_flist1, older_flist1)
gfpm = map(None, newer_flist2, older_flist2)
out_gfp = []
for k, v in gfp:
if k is not None:
out_gfp.append(k)
if v is not None:
out_gfp.append(v)
out_gfpm = []
for k, v in gfpm:
if k is not None:
out_gfpm.append(k)
if v is not None:
out_gfpm.append(v)
return out_gfp, out_gfpm
else:
print_log(settings, '[Error] -- Choosen Scenario is not supported. Please use either T, B or M -- ')
sys.exit(3)
#---------
def base_getcover(self, file_list, input_params, settings, temp_storage, mask):
"""
Function to actually requesting and saving the available coverages on the local file system.
"""
# get the time of downloading - to be used in the filename (to differentiate if multiple AOIs of
# the same coverages are downloaded to the same output directory)
target_server, toi_values, aoi_values, dss = self.set_request_values(settings, input_params, mask=False)
request = {'request': 'GetCoverage' ,
'server_url': target_server ,
# this is set in the file_list loop
# 'coverageID': COVERAGEID ,
'format': 'tiff' ,
'subset_x': 'epsg:4326 Long '+ aoi_values[0]+','+aoi_values[1] ,
'subset_y': 'epsg:4326 Lat '+aoi_values[2]+','+aoi_values[3],
# 'output': input_params['output_dir'] }
# we need to use the tmporary directory here!
'output': temp_storage }
# create output-crs syntax to be added to GetCoverage request
if input_params['output_crs'] != None:
request['outputcrs'] = input_params['output_crs'].split(':')[1]
# handle band-subsetting
if input_params['bands'] != '999':
bands = ''
for bb in input_params['bands']:
bands = bands+bb+','
request['rangesubset'] = bands[:-1]
# don't use bandsubsetting for requests regarding mask-files
if mask is True:
request['rangesubset'] = None
for COVERAGEID in file_list:
request['coverageID'] = COVERAGEID
res_getcov = wcs.GetCoverage(request, settings, input_params)
if res_getcov is not 200:
print_log(settings, res_getcov)
#/************************************************************************/
#/* CF_landsat5_2a_Reader */
#/************************************************************************/
class CF_landsat5_2a_Reader(Reader):
"""
reader module for the cryoland dataset
- mainly for testing and development
- and for demonstration of WCS usage
"""
def __init__(self):
Reader.__init__(self)
#/************************************************************************/
#/* CF_spot4take5_Reader() */
#/************************************************************************/
class CF_spot4take5_n2a_pente_Reader(Reader):
"""
reader module for the spot4take5_n2a_pentec dataset
- mainly for testing and development
- and for demonstration of WCS usage
"""
def __init__(self):
Reader.__init__(self)
#/************************************************************************/
#/* CF_cryoland_Reader */
#/************************************************************************/
class CF_cryoland_Reader(Reader):
"""
reader module for the cryoland dataset
- mainly for testing and development
- and for demonstration of WCS usage
"""
def __init__(self):
Reader.__init__(self)
#---------
def get_filelist(self, input_params, settings):
"""
uses WCS requests to generate filelist of files available at service/server
"""
cov_list = self.base_desceocover(input_params, settings, mask=False)
base_flist = []
base_mask_flist = []
gfp_flist = []
gfpmask_flist = []
# split up the received listing - Base, Base_mask, GFPs, GFPMask (--> cryoland products do not have masks)
cnt = 0
for elem in cov_list:
try:
idx = elem.find(input_params['toi'])
if idx > -1:
b_cov = cov_list.pop(cnt)
base_flist.append(b_cov)
else:
b_cov = ''
cnt += 1
except ValueError:
print_log(settings, str(ValueError))
gfp_flist = list(cov_list)
return base_flist, base_mask_flist, gfp_flist, gfpmask_flist
#/************************************************************************/
#/* CF_landsat5_f_Reader */
#/************************************************************************/
class CF_landsat5_f_Reader(Reader):
"""
reader module for the landsat5_f dataset
'_f' = means located at hard disk
'_w' = means accessible via WCS service
"""
def __init__(self):
Reader.__init__(self)
#----
def get_maskname(self, filename):
"""
set the mask filename filter and get the mask filename(-list)
return mask-filename or list of mask-filenames (if list is provided)
"""
# check if list or single name has been provided
if type(filename) == list:
mask_filename = []
for elem in filename:
# for landsat5 - mask would be
base, extension = os.path.splitext(elem)
m_filename = "%s.nuages%s" % (base, extension)
mask_filename.append(m_filename)
elif type(filename) == str:
base, extension = os.path.splitext(filename)
mask_filename = "%s.nuages%s" % (base, extension)
return mask_filename
#----
def get_filelist(self, input_params, settings):
"""
gets the listing of filenames of available: Base files, GFP files and Mask files
"""
target_date = input_params['toi']
access_path1 = settings['dataset.'+input_params['dataset']]
pos1 = str.index(access_path1, '://')
access_path = access_path1[pos1+3:]
## TODO: this should be more general - eg. using: get_daterange() ??
start_year = int(input_params['toi'][0:4])
start_month = int(input_params['toi'][4:6])
if input_params['scenario'] == 'T':
if start_month == 1:
loop = [ start_year - 1, '11', start_year -1 , '12', start_year, start_month ]
elif start_month == 2:
loop = [ start_year - 1, '12', start_year, start_month -1 , start_year, start_month ]
else:
loop = [start_year, start_month - 2, start_year, start_month -1 , start_year, start_month ]
elif input_params['scenario'] == 'B':
if start_month == 12:
loop = [start_year, start_month, start_year + 1, '01', start_year + 1, '02' ]
elif start_month == 11:
loop = [start_year, start_month, start_year, start_month + 1 , start_year +1 , '01' ]
else:
loop = [start_year, start_month, start_year, start_month + 1, start_year, start_month + 2 ]
elif input_params['scenario'] == 'M':
if start_month == 12:
loop = [start_year, start_month - 1, start_year, start_month, start_year +1 , '01' ]
elif start_month == 1:
loop = [start_year -1, '12', start_year, start_month, start_year, start_month + 1 ]
else:
loop = [start_year, start_month - 1, start_year, start_month, start_year, start_month + 1 ]
if len(str(loop[1])) == 1:
loop[1] = '0'+str(loop[1])
if len(str(loop[3])) == 1:
loop[3] = '0'+str(loop[3])
if len(str(loop[5])) == 1:
loop[5] = '0'+str(loop[5])
base_flist = []
base_mask_flist = []
gfp_flist = []
gfpmask_flist = []
base_fname_syntax = 'L*_' + target_date + '*_L5_*_surf_pente_30m.tif'
gfp_fname_syntax = 'L*_*_L5_*_surf_pente_30m.tif'
for jj in range(0, len(loop)-1, 2):
target = str(loop[jj])+'/'+str(loop[jj+1])+'/'
base_flist = base_flist+sorted(findfile(access_path+target, base_fname_syntax))
gfp_flist = gfp_flist+sorted(findfile(access_path+target, gfp_fname_syntax))
# now remove any base_filenames from the gfp_flist, to avoid duplicates
gfp_flist = [item for item in gfp_flist if not item in base_flist]
# create the file-list for the mask-files
gfpmask_flist = self.get_maskname(gfp_flist)
base_mask_flist = self.get_maskname(base_flist)
# return all created file-lists
return base_flist, base_mask_flist, gfp_flist, gfpmask_flist
#----
def base_getcover(self, file_list, input_params, settings, temp_storage, mask):
"""
Processing takes place on the original data (skipping copying, but
maybe risking data cuorruption ?), no data transformation (subsetting, CRS,
band subsetting) is currently implemented
"""
pass
#----
#/************************************************************************/
#/* CF_spot4take5_f_Reader */
#/************************************************************************/
class CF_spot4take5_f_Reader(Reader):
"""
reader module for the spot4take5_f dataset
'_f' = means located at hard disk
'_w' = means accessible via WCS service
"""
def __init__(self):
Reader.__init__(self)
def get_maskname(self, filename):
"""
set the mask filename filter and get the mask filename(-list)
return m ask-filename or list of mask-filenames (if list is provided)
"""
# check if list or single name has been provided
if type(filename) == list:
mask_filename = []
for elem in filename:
dirname = os.path.dirname(elem)
basename = os.path.basename(elem)
#basename1 = basename.replace('_ORTHO_','_')
# basename1 = basename1.replace('_PENTE_','_')
m_filename1 = basename[0:25]+'*_NUA.TIF'
m_filename = sorted(findfile(dirname+'/MASK/', m_filename1))
mask_filename.append(str(m_filename[0]))
elif type(filename) == str:
dirname = os.path.dirname(elem)
basename = os.path.basename(elem)
#basename1 = basename.replace('_ORTHO_','_')
#basename1 = basename1.replace('_PENTE_','_')
m_filename1 = basename[0:25]+'*_NUA.TIF'
m_filename = sorted(findfile(dirname+'/MASK/', m_filename1))
mask_filename = str(m_filename[0])
return mask_filename
#----
def get_filelist(self, input_params, settings):
"""
gets the listing of filenames of available: Base files, GFP files and Mask files
"""
target_date = input_params['toi']
access_path1 = settings['dataset.'+input_params['dataset']]
pos1 = str.index(access_path1, '://')
access_path = access_path1[pos1+3:]
base_flist = []
base_mask_flist = []
gfp_flist = []
gfpmask_flist = []
base_fname_syntax = 'SPOT4_*' + target_date + '*_PENTE_*.TIF'
gfp_fname_syntax = 'SPOT4_*_PENTE_*.TIF'
base_flist = base_flist+sorted(findfile(access_path, base_fname_syntax))
base_mask_flist = self.get_maskname(base_flist)
gfp_flist = gfp_flist+sorted(findfile(access_path, gfp_fname_syntax))
gfp_flist = [item for item in gfp_flist if not item in base_flist]
gfpmask_flist = self.get_maskname(gfp_flist)
return base_flist, base_mask_flist, gfp_flist, gfpmask_flist
#----
def base_getcover(self, file_list, input_params, settings, temp_storage, mask):
"""
Processing takes place on the original data (skipping copying, but
maybe risking data cuorruption ?), no data transformation (subsetting, CRS,
band subsetting) is currently implemented
"""
pass
#/************************************************************************/
#/* CF_spot4take5_f_Reader */
#/************************************************************************/
class CF_cryoland_local_Reader(Reader):
"""
reader module for the cryoland dataset
- mainly for testing and development
- and for demonstration of WCS usage
"""
def __init__(self):
Reader.__init__(self)
#----
def get_filelist(self, input_params, settings):
"""
uses WCS requests to generate filelist of files available at service/server
"""
avail_dss = self.get_available_dss(input_params, settings, False)
base_flist = []
base_flist = list(avail_dss)
base_mask_flist = list(base_flist)
gfp_flist = []
gfpmask_flist = list(gfp_flist)
return base_flist, base_mask_flist, gfp_flist, gfpmask_flist
#/************************************************************************/
#/* CF_landsat5_m_Reader */
#/************************************************************************/
class CF_landsat5_m_Reader(Reader):
"""
This represents actually a "_f" but it contians files in an unstructured
way (-> mix) set-up for easier testing purpose only
reader module for the landsat5_f dataset
'_f' = means located at hard disk
'_w' = means accessible via WCS service
'_m' = means use of the "local-mixed" dataset
"""
def __init__(self):
Reader.__init__(self)
#----
def get_maskname(self, filename):
"""
set the mask filename filter and get the mask filename(-list)
return mask-filename or list of mask-filenames (if list is provided)
"""
# check if list or single name has been provided
if type(filename) == list:
mask_filename = []
for elem in filename:
# for landsat5 - mask would be
base, extension = os.path.splitext(elem)
m_filename = "%s.nuages%s" % (base, extension)
mask_filename.append(m_filename)
elif type(filename) == str:
base, extension = os.path.splitext(filename)
mask_filename = "%s.nuages%s" % (base, extension)
return mask_filename
#----
def get_filelist(self, input_params, settings):
"""
gets the listing of filenames of available: Base files, GFP files and Mask files
"""
target_date = input_params['toi']
access_path1 = settings['dataset.'+input_params['dataset']]
pos1 = str.index(access_path1, '://')
acces_path = access_path1[pos1+3:]
base_fname_syntax = 'L*_' + target_date + '_L5_*_surf_pente_30m.tif'
gfp_fname_syntax = 'L*_*_L5_*_surf_pente_30m.tif'
base_flist = sorted(findfile(acces_path, base_fname_syntax))
gfp_flist = sorted(findfile(acces_path, gfp_fname_syntax))[0:input_params['period']]
# now remove any base_filenames from the gfp_flist, to avoid duplicates
gfp_flist = [item for item in gfp_flist if not item in base_flist]
# create the file-list for the mask-files
gfpmask_flist = self.get_maskname(gfp_flist)
base_mask_flist = self.get_maskname(base_flist)
return base_flist, base_mask_flist, gfp_flist, gfpmask_flist
#----
def base_getcover(self, file_list, input_params, settings, temp_storage, mask):
"""
Processing takes place on the original data (skipping copying, but
maybe risking data cuorruption ?), no data transformation (subsetting, CRS,
band subsetting) is currently implemented
"""
pass
#/************************************************************************/
#/* main() */
#/************************************************************************/
if __name__ == '__main__':
Reader()
| 37.763369 | 136 | 0.540659 |
7941ddf01269aa1b65e2cc1702e4afa3dfdd2a20 | 11,769 | py | Python | envycontrol.py | mphe/envycontrol | ab567d623e58e71a80db10cf01fda4046d14e237 | [
"MIT"
] | null | null | null | envycontrol.py | mphe/envycontrol | ab567d623e58e71a80db10cf01fda4046d14e237 | [
"MIT"
] | null | null | null | envycontrol.py | mphe/envycontrol | ab567d623e58e71a80db10cf01fda4046d14e237 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse
import sys
import os
import re
import subprocess
# constants declaration
VERSION = '1.2.1'
# for integrated mode
BLACKLIST_PATH = '/etc/modprobe.d/blacklist-nvidia.conf'
BLACKLIST_CONTENT = '''# Do not modify this file
# Generated by EnvyControl
blacklist nouveau
blacklist nvidia
blacklist nvidia_drm
blacklist nvidia_uvm
blacklist nvidia_modeset
alias nouveau off
alias nvidia off
alias nvidia_drm off
alias nvidia_uvm off
alias nvidia_modeset off
'''
UDEV_PATH = '/lib/udev/rules.d/50-remove-nvidia.rules'
UDEV_CONTENT = '''# Do not modify this file
# Generated by EnvyControl
# Remove NVIDIA USB xHCI Host Controller devices, if present
ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1"
# Remove NVIDIA USB Type-C UCSI devices, if present
ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c8000", ATTR{remove}="1"
# Remove NVIDIA Audio devices, if present
ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x040300", ATTR{remove}="1"
# Finally, remove the NVIDIA dGPU
ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x03[0-9]*", ATTR{remove}="1"
'''
# for Nvidia mode
XORG_PATH = '/etc/X11/xorg.conf.d/90-nvidia.conf'
XORG_CONTENT_INTEL = '''# Do not modify this file
# Generated by EnvyControl
Section "ServerLayout"
Identifier "layout"
Screen 0 "nvidia"
Inactive "intel"
EndSection
Section "Device"
Identifier "nvidia"
Driver "nvidia"
BusID "PCI:{}"
EndSection
Section "Screen"
Identifier "nvidia"
Device "nvidia"
Option "AllowEmptyInitialConfiguration"
EndSection
Section "Device"
Identifier "intel"
Driver "modesetting"
EndSection
Section "Screen"
Identifier "intel"
Device "intel"
EndSection
'''
XORG_CONTENT_AMD = '''# Do not modify this file
# Generated by EnvyControl
Section "ServerLayout"
Identifier "layout"
Screen 0 "nvidia"
Inactive "amdgpu"
EndSection
Section "Device"
Identifier "nvidia"
Driver "nvidia"
BusID "PCI:{}"
EndSection
Section "Screen"
Identifier "nvidia"
Device "nvidia"
Option "AllowEmptyInitialConfiguration"
EndSection
Section "Device"
Identifier "amdgpu"
Driver "modesetting"
EndSection
Section "Screen"
Identifier "amd"
Device "amdgpu"
EndSection
'''
NVIDIA_MODESET_PATH = '/etc/modprobe.d/nvidia.conf'
NVIDIA_MODESET_CONTENT = '''# Do not modify this file
# Generated by EnvyControl
options nvidia-drm modeset=1
'''
# SDDM and LightDM require additional setup for Nvidia mode
XRANDR_SCRIPT = '''#!/bin/sh
# Do not modify this file
# Generated by EnvyControl
xrandr --setprovideroutputsource modesetting NVIDIA-0
xrandr --auto
'''
SDDM_SCRIPT_PATH = '/usr/share/sddm/scripts/Xsetup'
LIGHTDM_SCRIPT_PATH = '/etc/lightdm/nvidia.sh'
LIGHTDM_CONFIG_PATH = '/etc/lightdm/lightdm.conf.d/20-nvidia.conf'
LIGHTDM_CONFIG_CONTENT = '''# Do not modify this file
# Generated by EnvyControl
[Seat:*]
display-setup-script=/etc/lightdm/nvidia.sh
'''
# function declaration
def _check_root():
if not os.geteuid() == 0:
print('Error: this operation requires root privileges')
sys.exit(1)
def _check_status():
if os.path.exists(BLACKLIST_PATH) and os.path.exists(UDEV_PATH):
mode = 'integrated'
elif os.path.exists(XORG_PATH) and os.path.exists(NVIDIA_MODESET_PATH):
mode = 'nvidia'
else:
mode = 'hybrid'
print(f'Current graphics mode is: {mode}')
def _file_remover():
# utility function to cleanup environment before setting any mode
# don't raise warning if file is not found
try:
os.remove(BLACKLIST_PATH)
except OSError as e:
if e.errno != 2:
print(f'Error: {e}')
sys.exit(1)
try:
os.remove(UDEV_PATH)
except OSError as e:
if e.errno != 2:
print(f'Error: {e}')
sys.exit(1)
try:
os.remove(XORG_PATH)
except OSError as e:
if e.errno != 2:
print(f'Error: {e}')
sys.exit(1)
try:
os.remove(NVIDIA_MODESET_PATH)
except OSError as e:
if e.errno != 2:
print(f'Error: {e}')
sys.exit(1)
try:
os.remove(SDDM_SCRIPT_PATH)
except OSError as e:
if e.errno != 2:
print(f'Error: {e}')
sys.exit(1)
try:
os.remove(LIGHTDM_SCRIPT_PATH)
except OSError as e:
if e.errno != 2:
print(f'Error: {e}')
sys.exit(1)
try:
os.remove(LIGHTDM_CONFIG_PATH)
except OSError as e:
if e.errno != 2:
print(f'Error: {e}')
sys.exit(1)
def _get_igpu_vendor():
# automatically detect whether Intel or AMD iGPU is present
pattern_intel = re.compile(r'(VGA).*(Intel)')
pattern_amd = re.compile(r'(VGA).*(ATI|AMD|AMD\/ATI)')
lspci = subprocess.run(['lspci'], capture_output=True, text=True).stdout
if pattern_intel.findall(lspci):
return 'intel'
elif pattern_amd.findall(lspci):
return 'amd'
else:
print('Error: could not find Intel or AMD iGPU')
sys.exit(1)
def _get_pci_bus():
# dynamically get the PCI bus of the Nvidia dGPU
# exit if not found
pattern = re.compile(r'([0-9]{2}:[0-9a-z]{2}.[0-9]).*(VGA compatible controller: NVIDIA|3D controller: NVIDIA)')
lspci = subprocess.run(['lspci'], capture_output=True, text=True).stdout
try:
# X.org requires PCI:X:X:X format
return ':'.join([str(int(element)) for element in pattern.findall(lspci)[0][0].replace('.', ':').split(':')])
except Exception:
print('Error: could not find Nvidia GPU on PCI bus, please switch to hybrid mode first')
sys.exit(1)
def _check_display_manager():
# automatically detect the current Display Manager
# this depends on systemd
pattern = re.compile(r'(\/usr\/bin\/|\/usr\/sbin\/)(.*)')
try:
with open('/etc/systemd/system/display-manager.service',mode='r', encoding='utf-8') as f:
display_manager = pattern.findall(f.read())[0][1]
except Exception:
display_manager = ''
print('Warning: automatic Display Manager detection is not available')
finally:
return display_manager
def _setup_display_manager(display_manager):
# setup the Xrandr script if necessary
if display_manager == 'sddm':
try:
with open(SDDM_SCRIPT_PATH, mode='w', encoding='utf-8') as f:
f.write(XRANDR_SCRIPT)
except Exception as e:
print(f'Error: {e}')
sys.exit(1)
subprocess.run(['chmod','+x',SDDM_SCRIPT_PATH], stdout=subprocess.DEVNULL)
elif display_manager == 'lightdm':
try:
with open(LIGHTDM_SCRIPT_PATH, mode='w', encoding='utf-8') as f:
f.write(XRANDR_SCRIPT)
except Exception as e:
print(f'Error: {e}')
sys.exit(1)
subprocess.run(['chmod','+x',LIGHTDM_SCRIPT_PATH], stdout=subprocess.DEVNULL)
# create config
if not os.path.exists(os.path.dirname(LIGHTDM_CONFIG_PATH)):
try:
os.makedirs(os.path.dirname(LIGHTDM_CONFIG_PATH))
except Exception as e:
print(f'Error: {e}')
sys.exit(1)
with open(LIGHTDM_CONFIG_PATH, mode='w', encoding='utf-8') as f:
f.write(LIGHTDM_CONFIG_CONTENT)
elif display_manager not in ['', 'gdm', 'gdm3']:
print('Error: provided Display Manager is not valid')
print('Supported Display Managers: gdm, sddm, lightdm')
sys.exit(1)
def _rebuild_initramfs():
# Debian and its derivatives require rebuilding the initramfs after switching modes
is_debian = os.path.exists('/etc/debian_version')
if is_debian:
print('Rebuilding initramfs...')
p = subprocess.run(['update-initramfs', '-u', '-k', 'all'], stdout=subprocess.DEVNULL)
if p.returncode == 0:
print('Successfully rebuilt initramfs!')
else:
print('Error: an error ocurred rebuilding the initramfs')
def _switcher(mode, display_manager = ''):
# exit if not running as root
_check_root()
if mode == 'integrated':
_file_remover()
try:
# blacklist all nouveau and Nvidia modules
with open(BLACKLIST_PATH, mode='w', encoding='utf-8') as f:
f.write(BLACKLIST_CONTENT)
# power off the Nvidia GPU with Udev rules
with open(UDEV_PATH, mode='w', encoding='utf-8') as f:
f.write(UDEV_CONTENT)
except Exception as e:
print(f'Error: {e}')
sys.exit(1)
_rebuild_initramfs()
elif mode == 'nvidia':
_file_remover()
# detect if Intel or AMD iGPU
igpu_vendor = _get_igpu_vendor()
# get the Nvidia dGPU PCI bus
pci_bus = _get_pci_bus()
# detect Display Manager if not provided
if display_manager == '':
display_manager = _check_display_manager()
_setup_display_manager(display_manager)
try:
# create X.org config
with open(XORG_PATH, mode='w', encoding='utf-8') as f:
if igpu_vendor == 'intel':
f.write(XORG_CONTENT_INTEL.format(pci_bus))
elif igpu_vendor == 'amd':
f.write(XORG_CONTENT_AMD.format(pci_bus))
# modeset for Nvidia driver is required to prevent tearing on internal screen
with open(NVIDIA_MODESET_PATH, mode='w', encoding='utf-8') as f:
f.write(NVIDIA_MODESET_CONTENT)
except Exception as e:
print(f'Error: {e}')
sys.exit(1)
_rebuild_initramfs()
elif mode == 'hybrid':
# remove all files created by other EnvyControl modes
# Nvidia and nouveau drivers fallback to hybrid mode by default
_file_remover()
_rebuild_initramfs()
else:
print('Error: provided graphics mode is not valid')
print('Supported modes: integrated, nvidia, hybrid')
sys.exit(1)
print(f'Graphics mode set to: {mode}')
print('Please reboot your computer for changes to apply')
def _print_version():
print(f'EnvyControl {VERSION}')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--status', action='store_true', help='Query the current graphics mode set by EnvyControl')
parser.add_argument('--switch', type=str, metavar='MODE', action='store',
help='Switch the graphics mode. You need to reboot for changes to apply. Supported modes: integrated, nvidia, hybrid')
parser.add_argument('--dm', type=str, metavar='DISPLAY_MANAGER', action='store',
help='Manually specify your Display Manager. This is required only for systems without systemd. Supported DMs: gdm, sddm, lightdm')
parser.add_argument('--version', '-v', action='store_true', help='Print the current version and exit')
# print help if no arg is provided
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if args.status:
_check_status()
elif args.version:
_print_version()
elif args.switch:
if args.dm and args.switch == 'nvidia':
_switcher(args.switch, args.dm)
else:
_switcher(args.switch)
elif args.dm and not args.switch:
print('Error: this option is intended to be used with --switch nvidia')
print('Example: sudo envycontrol --switch nvidia --dm sddm')
sys.exit(1)
if __name__ == '__main__':
main()
| 30.808901 | 155 | 0.633019 |
7941df7ee157705072049a880567ebe91c62aea4 | 1,576 | py | Python | Macropad_Hotkeys/macros/win-edge.py | gamblor21/Adafruit_Learning_System_Guides | f5dab4a758bc82d0bfc3c299683fe89dc093912a | [
"MIT"
] | 665 | 2017-09-27T21:20:14.000Z | 2022-03-31T09:09:25.000Z | Macropad_Hotkeys/macros/win-edge.py | gamblor21/Adafruit_Learning_System_Guides | f5dab4a758bc82d0bfc3c299683fe89dc093912a | [
"MIT"
] | 641 | 2017-10-03T19:46:37.000Z | 2022-03-30T18:28:46.000Z | Macropad_Hotkeys/macros/win-edge.py | gamblor21/Adafruit_Learning_System_Guides | f5dab4a758bc82d0bfc3c299683fe89dc093912a | [
"MIT"
] | 734 | 2017-10-02T22:47:38.000Z | 2022-03-30T14:03:51.000Z | # MACROPAD Hotkeys example: Microsoft Edge web browser for Windows
from adafruit_hid.keycode import Keycode # REQUIRED if using Keycode.* values
app = { # REQUIRED dict, must be named 'app'
'name' : 'Windows Edge', # Application name
'macros' : [ # List of button macros...
# COLOR LABEL KEY SEQUENCE
# 1st row ----------
(0x004000, '< Back', [Keycode.ALT, Keycode.LEFT_ARROW]),
(0x004000, 'Fwd >', [Keycode.ALT, Keycode.RIGHT_ARROW]),
(0x400000, 'Up', [Keycode.SHIFT, ' ']), # Scroll up
# 2nd row ----------
(0x202000, '- Size', [Keycode.CONTROL, Keycode.KEYPAD_MINUS]),
(0x202000, 'Size +', [Keycode.CONTROL, Keycode.KEYPAD_PLUS]),
(0x400000, 'Down', ' '), # Scroll down
# 3rd row ----------
(0x000040, 'Reload', [Keycode.CONTROL, 'r']),
(0x000040, 'Home', [Keycode.ALT, Keycode.HOME]),
(0x000040, 'Private', [Keycode.CONTROL, 'N']),
# 4th row ----------
(0x000000, 'Ada', [Keycode.CONTROL, 'n', -Keycode.COMMAND,
'www.adafruit.com\n']), # Adafruit in new window
(0x800000, 'Digi', [Keycode.CONTROL, 'n', -Keycode.COMMAND,
'www.digikey.com\n']), # Digi-Key in new window
(0x101010, 'Hacks', [Keycode.CONTROL, 'n', -Keycode.COMMAND,
'www.hackaday.com\n']), # Hack-a-Day in new win
# Encoder button ---
(0x000000, '', [Keycode.CONTROL, 'w']) # Close tab
]
}
| 49.25 | 77 | 0.521574 |
7941e13004cbd1b3c30644ced9bb63039b4bcfa3 | 2,330 | py | Python | WebKit/Tools/gtk/common.py | JavaScriptTesting/LJS | 9818dbdb421036569fff93124ac2385d45d01c3a | [
"Apache-2.0"
] | 1 | 2019-06-18T06:52:54.000Z | 2019-06-18T06:52:54.000Z | WebKit/Tools/gtk/common.py | JavaScriptTesting/LJS | 9818dbdb421036569fff93124ac2385d45d01c3a | [
"Apache-2.0"
] | null | null | null | WebKit/Tools/gtk/common.py | JavaScriptTesting/LJS | 9818dbdb421036569fff93124ac2385d45d01c3a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright (C) 2011 Igalia S.L.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import os
import subprocess
import sys
script_dir = None
build_dir = None
def script_path(*args):
global script_dir
if not script_dir:
script_dir = os.path.join(os.path.dirname(__file__), '..', 'Scripts')
return os.path.join(*(script_dir,) + args)
def top_level_path(*args):
return os.path.join(*((script_path('..', '..'),) + args))
def get_build_path():
global build_dir
if build_dir:
return build_dir
def is_valid_build_directory(path):
return os.path.exists(os.path.join(path, 'GNUmakefile'))
build_types = ['Release', 'Debug']
if '--debug' in sys.argv:
build_types.reverse()
for build_type in build_types:
build_dir = top_level_path('WebKitBuild', build_type)
if is_valid_build_directory(build_dir):
return build_dir
# distcheck builds in a directory named _build in the top-level path.
build_dir = top_level_path("_build")
if is_valid_build_directory(build_dir):
return build_dir
build_dir = top_level_path()
if is_valid_build_directory(build_dir):
return build_dir
build_dir = top_level_path("WebKitBuild")
if is_valid_build_directory(build_dir):
return build_dir
print 'Could not determine build directory.'
sys.exit(1)
def build_path(*args):
return os.path.join(*(get_build_path(),) + args)
def number_of_cpus():
process = subprocess.Popen([script_path('num-cpus')], stdout=subprocess.PIPE)
stdout = process.communicate()[0]
return int(stdout)
| 29.493671 | 81 | 0.707725 |
7941e1a47f8bfd3693956b90a156b5c63e99d1a3 | 883 | py | Python | Medeina/config.py | Daniel-Davies/Medeina | 94ee3f083032f65570e6073b874aba1820451f26 | [
"MIT"
] | 1 | 2020-06-28T00:18:15.000Z | 2020-06-28T00:18:15.000Z | Medeina/config.py | Daniel-Davies/Medeina | 94ee3f083032f65570e6073b874aba1820451f26 | [
"MIT"
] | null | null | null | Medeina/config.py | Daniel-Davies/Medeina | 94ee3f083032f65570e6073b874aba1820451f26 | [
"MIT"
] | null | null | null | import os.path
DATASETS = "datasets"
WEB = "interactionWeb"
TAXA = "taxonomicIndex"
# CONFIDENCE = 'confidences'
LINKS = "links"
EXCEPTIONS = "reorderedTaxaInteractions"
REALNAMES = "speciesStringNames"
IDTRACKER = (
"numericCounter-b2ca94aee362f455a41493a0d28b98bc5074065b0f96cbb95028ead20b1c72ea"
)
my_path = os.path.abspath(os.path.dirname(__file__))
ROOT = my_path
ZIPDIR = os.path.join(my_path, "CompressedWebStore.zip")
BASEDIR = os.path.join(my_path, "CompressedWebStore")
TAXA_OF_INTEREST = ["kingdom", "phylum", "order", "class", "family", "genus"]
APIMAX = 100
APIURL = "http://resolver.globalnames.org/name_resolvers.json"
DATASET_METAS = [
"interactionType",
"evidencedBy",
"webName",
"citation",
"location",
"date",
]
LINK_METAS = ["interactionType", "evidencedBy", "location"]
PRECOMPUTER_STORE_PATH = "TaxaMapper/newMappingIndex"
| 22.641026 | 85 | 0.732729 |
7941e1eeef41a7113b9de950ba38f7fac8277a12 | 7,556 | py | Python | pgmpy/estimators/CITests.py | predictive-analytics-lab/pgmpy | 6c2a31641adc72793acd130d007190fdb1632271 | [
"MIT"
] | null | null | null | pgmpy/estimators/CITests.py | predictive-analytics-lab/pgmpy | 6c2a31641adc72793acd130d007190fdb1632271 | [
"MIT"
] | null | null | null | pgmpy/estimators/CITests.py | predictive-analytics-lab/pgmpy | 6c2a31641adc72793acd130d007190fdb1632271 | [
"MIT"
] | null | null | null | from warnings import warn
import numpy as np
import pandas as pd
from scipy import stats
def chi_square(X, Y, Z, data, **kwargs):
"""
Chi-square conditional independence test.
Tests the null hypothesis that X is independent from Y given Zs.
This is done by comparing the observed frequencies with the expected
frequencies if X,Y were conditionally independent, using a chisquare
deviance statistic. The expected frequencies given independence are
`P(X,Y,Zs) = P(X|Zs)*P(Y|Zs)*P(Zs)`. The latter term can be computed
as `P(X,Zs)*P(Y,Zs)/P(Zs).
Parameters
----------
X: int, string, hashable object
A variable name contained in the data set
Y: int, string, hashable object
A variable name contained in the data set, different from X
Zs: list of variable names
A list of variable names contained in the data set, different from X and Y.
This is the separating set that (potentially) makes X and Y independent.
Default: []
Returns
-------
chi2: float
The chi2 test statistic.
p_value: float
The p_value, i.e. the probability of observing the computed chi2
statistic (or an even higher value), given the null hypothesis
that X _|_ Y | Zs.
sufficient_data: bool
A flag that indicates if the sample size is considered sufficient.
As in [4], require at least 5 samples per parameter (on average).
That is, the size of the data set must be greater than
`5 * (c(X) - 1) * (c(Y) - 1) * prod([c(Z) for Z in Zs])`
(c() denotes the variable cardinality).
References
----------
[1] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
Section 18.2.2.3 (page 789)
[2] Neapolitan, Learning Bayesian Networks, Section 10.3 (page 600ff)
http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
[3] Chi-square test https://en.wikipedia.org/wiki/Pearson%27s_chi-squared_test#Test_of_independence
[4] Tsamardinos et al., The max-min hill-climbing BN structure learning algorithm, 2005, Section 4
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>> data = pd.DataFrame(np.random.randint(0, 2, size=(50000, 4)), columns=list('ABCD'))
>>> data['E'] = data['A'] + data['B'] + data['C']
>>> c = ConstraintBasedEstimator(data)
>>> print(c.test_conditional_independence('A', 'C')) # independent
(0.95035644482050263, 0.8132617142699442, True)
>>> print(c.test_conditional_independence('A', 'B', 'D')) # independent
(5.5227461320130899, 0.59644169242588885, True)
>>> print(c.test_conditional_independence('A', 'B', ['D', 'E'])) # dependent
(9192.5172226063387, 0.0, True)
"""
if isinstance(Z, (frozenset, list, set, tuple)):
Z = list(Z)
else:
Z = [Z]
state_names = kwargs["state_names"]
num_params = (
(len(state_names[X]) - 1)
* (len(state_names[Y]) - 1)
* np.prod([len(state_names[z]) for z in Z])
)
sufficient_data = len(data) >= num_params * 5
if not sufficient_data:
warn(
"Insufficient data for testing {0} _|_ {1} | {2}. ".format(X, Y, Z)
+ "At least {0} samples recommended, {1} present.".format(5 * num_params, len(data))
)
# compute actual frequency/state_count table:
# = P(X,Y,Zs)
XYZ_state_counts = pd.crosstab(index=data[X], columns=[data[Y]] + [data[z] for z in Z])
# reindex to add missing rows & columns (if some values don't appear in data)
row_index = state_names[X]
column_index = pd.MultiIndex.from_product(
[state_names[Y]] + [state_names[z] for z in Z], names=[Y] + Z
)
if not isinstance(XYZ_state_counts.columns, pd.MultiIndex):
XYZ_state_counts.columns = pd.MultiIndex.from_arrays([XYZ_state_counts.columns])
XYZ_state_counts = XYZ_state_counts.reindex(index=row_index, columns=column_index).fillna(0)
# compute the expected frequency/state_count table if X _|_ Y | Zs:
# = P(X|Zs)*P(Y|Zs)*P(Zs) = P(X,Zs)*P(Y,Zs)/P(Zs)
if Z:
XZ_state_counts = XYZ_state_counts.sum(axis=1, level=Z) # marginalize out Y
YZ_state_counts = XYZ_state_counts.sum().unstack(Z) # marginalize out X
else:
XZ_state_counts = XYZ_state_counts.sum(axis=1)
YZ_state_counts = XYZ_state_counts.sum()
Z_state_counts = YZ_state_counts.sum() # marginalize out both
XYZ_expected = pd.DataFrame(index=XYZ_state_counts.index, columns=XYZ_state_counts.columns)
for X_val in XYZ_expected.index:
if Z:
for Y_val in XYZ_expected.columns.levels[0]:
XYZ_expected.loc[X_val, Y_val] = (
XZ_state_counts.loc[X_val] * YZ_state_counts.loc[Y_val] / Z_state_counts
).values
else:
for Y_val in XYZ_expected.columns:
XYZ_expected.loc[X_val, Y_val] = (
XZ_state_counts.loc[X_val] * YZ_state_counts.loc[Y_val] / float(Z_state_counts)
)
observed = XYZ_state_counts.values.flatten()
expected = XYZ_expected.fillna(0).values.flatten()
# remove elements where the expected value is 0;
# this also corrects the degrees of freedom for chisquare
observed, expected = zip(*((o, e) for o, e in zip(observed, expected) if not e == 0))
chi2, significance_level = stats.chisquare(observed, expected)
return chi2, significance_level
def pearsonr(X, Y, Z, data):
"""
Computes Pearson correlation coefficient and p-value for testing non-correlation. Should be used
only on continuous data. In case when :math:`Z != \null` uses linear regression and computes pearson
coefficient on residuals.
Parameters
----------
X: str
The first variable for testing the independence condition X _|_ Y | Z
Y: str
The second variable for testing the independence condition X _|_ Y | Z
Z: list/array-like
A list of conditional variable for testing the condition X _|_ Y | Z
data: pandas.DataFrame
The dataset in which to test the indepenedence condition.
Returns
-------
Pearson's correlation coefficient: float
p-value: float
References
----------
[1] https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
[2] https://en.wikipedia.org/wiki/Partial_correlation#Using_linear_regression
"""
# Step 1: Test if the inputs are correct
if not hasattr(Z, "__iter__"):
raise ValueError("Variable Z. Expected type: iterable. Got type: {t}".format(t=type(Z)))
else:
Z = list(Z)
if not isinstance(data, pd.DataFrame):
raise ValueError(
"Variable data. Expected type: pandas.DataFrame. Got type: {t}".format(t=type(data))
)
# Step 2: If Z is empty compute a non-conditional test.
if len(Z) == 0:
return stats.pearsonr(data.loc[:, X], data.loc[:, Y])
# Step 3: If Z is non-empty, use linear regression to compute residuals and test independence on it.
else:
X_coef = np.linalg.lstsq(data.loc[:, Z], data.loc[:, X], rcond=None)[0]
Y_coef = np.linalg.lstsq(data.loc[:, Z], data.loc[:, Y], rcond=None)[0]
residual_X = data.loc[:, X] - data.loc[:, Z].dot(X_coef)
residual_Y = data.loc[:, Y] - data.loc[:, Z].dot(Y_coef)
return stats.pearsonr(residual_X, residual_Y)
| 39.560209 | 106 | 0.645844 |
7941e283964f544bb443152b4877c38a9edc6cae | 4,105 | py | Python | template_site_name/settings.py | Hack3rOneness/django-site-template | 90f9649d4130a5c0c5e105071f1338663a2e239b | [
"MIT"
] | 8 | 2019-04-22T04:37:39.000Z | 2021-05-20T02:49:10.000Z | template_site_name/settings.py | Hack3rOneness/django-site-template | 90f9649d4130a5c0c5e105071f1338663a2e239b | [
"MIT"
] | 2 | 2021-03-25T23:53:40.000Z | 2021-06-10T23:04:55.000Z | template_site_name/settings.py | Hack3rOneness/django-site-template | 90f9649d4130a5c0c5e105071f1338663a2e239b | [
"MIT"
] | 15 | 2018-07-09T01:11:39.000Z | 2022-03-23T19:12:29.000Z | """
Django settings for template_site_name project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y5&e!$t0#tk9^2pf-bplp8=7(f7$7q^!#!50(3+ihulo#f+=l_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'infopages',
'homepage',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
# 'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'template_site_name.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'template_site_name.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
| 27.92517 | 91 | 0.709622 |
7941e3446d22ac84d389605fa04fe1097e041c11 | 1,257 | py | Python | django_template_obfuscator/tests.py | rafahuelin/django-template-obfuscator | 5211bda5b9f37e8da6a44425f0362837e9d5a6ad | [
"MIT"
] | 3 | 2019-09-20T14:39:17.000Z | 2022-03-15T06:41:51.000Z | django_template_obfuscator/tests.py | rafahuelin/django-template-obfuscator | 5211bda5b9f37e8da6a44425f0362837e9d5a6ad | [
"MIT"
] | 5 | 2020-03-31T07:01:09.000Z | 2021-06-02T00:25:48.000Z | example/django_template_obfuscator/tests.py | rafahuelin/django-template-obfuscator | 5211bda5b9f37e8da6a44425f0362837e9d5a6ad | [
"MIT"
] | 1 | 2019-09-20T14:39:01.000Z | 2019-09-20T14:39:01.000Z | import unittest
from os import path
from selenium import webdriver
from templatetags.encoder import obfuscation
APP_DIR = path.dirname(path.abspath(__file__))
class ObfuscationTest(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def test_text_obfuscated_and_deobfuscated_equals_original_text(self):
target = "Text difficult to scrape."
obfuscated = obfuscation(target)
# import JS in charge of deobfuscation
filepath = APP_DIR + r'\static\js\django_template_obfuscator.js'
with open(filepath) as f:
js = f"<script>{f.read()}</script>"
html_content = f"""
<p class="obfuscated">
{obfuscated}
</p>
"""
self.browser.get(f"data:text/html;charset=utf-8, {html_content} {js}")
deobfuscated_target = self.browser.find_element_by_xpath('//*[@class="obfuscated"]').text
self.assertEqual(target, deobfuscated_target)
def test_obfuscate_function_uses_rot13_encode_algorithm(self):
obfuscated = obfuscation('Abracadabra!')
self.assertEqual(obfuscated, 'Noenpnqnoen!')
if __name__ == '__main__':
unittest.main()
| 27.933333 | 97 | 0.666667 |
7941e36ec83f81d453e71f7189dab4f2a571513f | 1,852 | py | Python | neutron/agent/l3/dvr_snat_ns.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | null | null | null | neutron/agent/l3/dvr_snat_ns.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | 3 | 2015-02-27T00:48:55.000Z | 2015-04-21T05:29:37.000Z | neutron/agent/l3/dvr_snat_ns.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | 3 | 2015-02-26T00:55:17.000Z | 2020-03-01T17:05:40.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from neutron.agent.l3 import namespaces
from neutron.agent.linux import ip_lib
from neutron.common import constants
LOG = logging.getLogger(__name__)
SNAT_NS_PREFIX = 'snat-'
SNAT_INT_DEV_PREFIX = constants.SNAT_INT_DEV_PREFIX
class SnatNamespace(namespaces.Namespace):
def __init__(self, router_id, agent_conf, driver, use_ipv6):
self.router_id = router_id
name = self.get_snat_ns_name(router_id)
super(SnatNamespace, self).__init__(
name, agent_conf, driver, use_ipv6)
@classmethod
def get_snat_ns_name(cls, router_id):
return namespaces.build_ns_name(SNAT_NS_PREFIX, router_id)
def delete(self):
ns_ip = ip_lib.IPWrapper(namespace=self.name)
if ns_ip.netns.exists(self.name):
for d in ns_ip.get_devices(exclude_loopback=True):
if d.name.startswith(SNAT_INT_DEV_PREFIX):
LOG.debug('Unplugging DVR device %s', d.name)
self.driver.unplug(d.name, namespace=self.name,
prefix=SNAT_INT_DEV_PREFIX)
# TODO(mrsmith): delete ext-gw-port
LOG.debug('DVR: destroy snat ns: %s', self.name)
super(SnatNamespace, self).delete()
| 38.583333 | 78 | 0.683585 |
7941e3fb6202d27762536634a13a14dd20ad336d | 3,896 | py | Python | token_management_system/token_manager/api/views.py | pawanvirsingh/token_management | b1ef01e19e37a61c627c6712917807424e77b823 | [
"Apache-2.0"
] | null | null | null | token_management_system/token_manager/api/views.py | pawanvirsingh/token_management | b1ef01e19e37a61c627c6712917807424e77b823 | [
"Apache-2.0"
] | null | null | null | token_management_system/token_manager/api/views.py | pawanvirsingh/token_management | b1ef01e19e37a61c627c6712917807424e77b823 | [
"Apache-2.0"
] | null | null | null | from rest_framework import status
from rest_framework.decorators import action
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.mixins import DestroyModelMixin, RetrieveModelMixin, \
UpdateModelMixin
from rest_framework.viewsets import GenericViewSet
from token_management_system.token_manager.api.serializers import TokenSerializer
from token_management_system.token_manager.models import Token
class TokenViewset(RetrieveModelMixin, UpdateModelMixin,
DestroyModelMixin, GenericViewSet):
serializer_class = TokenSerializer
permission_classes = (AllowAny,)
def get_queryset(self):
return Token.objects.all()
def list(self, request, *args, **kwargs):
result = Token.assign()
if result:
return Response(data={'token': result }, status=status.HTTP_200_OK)
return Response(status=status.HTTP_404_NOT_FOUND)
@action(methods=['get'], detail=True,url_path='unblock')
def unblock(self, request, pk=None):
object = self.get_object()
print(object.__dict__)
print( object.status == Token.FREE)
print(object.status)
if object.status == Token.FREE:
return Response({"success": False, "message":"You can not unblock a free token "}, status=status.HTTP_400_BAD_REQUEST)
object.unblock_token()
return Response({"success":True},status=status.HTTP_200_OK)
@action(methods=['get'], detail=True, url_path='keep-alive')
def alive(self, request, pk=None):
object = self.get_object()
print()
print(object.status == Token.FREE)
if object.status == Token.FREE:
return Response({"success": False, "message": "You can not keep alive a already free token"},
status=status.HTTP_400_BAD_REQUEST)
object.mark_token_alive()
return Response({"success": True}, status=status.HTTP_200_OK)
# class GetPlanView(ListModelMixin, GenericViewSet):
# serializer_class = serializers.PlanValiditySerializer
# permission_classes = (ServerPermission,)
# filter_backends = [DjangoFilterBackend]
# filterset_fields = ['customer__user_id', 'customer__product__name']
#
# def get_queryset(self):
# return Subscription.objects.filter(expires_at__gte=datetime.date.today()-datetime.timedelta(days=7))
# class HostedPageView(GenericViewSet, CreateModelMixin):
# serializer_class = serializers.HostedPageSerialzer
# permission_classes = [ServerPermission]
#
# def create(self, request, *args, **kwargs):
# serializer = self.get_serializer(data=request.data)
# serializer.is_valid(raise_exception=True)
# headers = self.get_success_headers(serializer.data)
# return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
#
#
# class PlanPaymentViewSet(GenericViewSet, CreateModelMixin):
# serializer_class = serializers.HostedPagePaymentSerialzer
# permission_classes = [ServerPermission]
#
# def create(self, request, *args, **kwargs):
# serializer = self.get_serializer(data=request.data)
# serializer.is_valid(raise_exception=True)
# headers = self.get_success_headers(serializer.data)
# return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
#
#
# class RenewSubscriptionPaymentViewSet(GenericViewSet, CreateModelMixin):
# serializer_class = serializers.SubscriptionRenewPaymentSerialzer
# permission_classes = [ServerPermission]
#
# def create(self, request, *args, **kwargs):
# serializer = self.get_serializer(data=request.data)
# serializer.is_valid(raise_exception=True)
# headers = self.get_success_headers(serializer.data)
# return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
| 41.892473 | 130 | 0.719969 |
7941e4f457d8f77bb56910fca9c64f649b0a699f | 1,963 | py | Python | src/plot/plot_int.py | Zimiao1025/Sesica | 8b96a9e35a423ef6bd8561ba296ca3f55057af06 | [
"BSD-2-Clause"
] | null | null | null | src/plot/plot_int.py | Zimiao1025/Sesica | 8b96a9e35a423ef6bd8561ba296ca3f55057af06 | [
"BSD-2-Clause"
] | null | null | null | src/plot/plot_int.py | Zimiao1025/Sesica | 8b96a9e35a423ef6bd8561ba296ca3f55057af06 | [
"BSD-2-Clause"
] | null | null | null | from itertools import cycle
import joblib
import matplotlib.pyplot as plt
def bar_fig(model_path, feats_name, fig_path):
# 利用shap打印特征重要度
gbm = joblib.load(model_path)
scores = gbm.feature_importances_
print(scores)
plt.figure(0)
bar_width = 0.4
x1 = []
for i in range(len(scores)):
x1.append(i)
plt.bar(x1, scores, bar_width, color='crimson', align="center", label="scores", alpha=0.8)
plt.xticks(x1, feats_name, size=10)
plt.title('Feature importance for LTR', fontsize=18)
plt.xlabel('method', fontsize=16)
plt.ylabel('Feature importance', fontsize=16)
ax_width = 1
ax = plt.gca() # 获取边框
ax.spines['bottom'].set_linewidth(ax_width)
ax.spines['left'].set_linewidth(ax_width)
ax.spines['top'].set_linewidth(ax_width)
ax.spines['right'].set_linewidth(ax_width)
plt.savefig(fig_path, bbox_inches='tight')
plt.close(0)
def pie_fig(model_path, recipe, fig_path):
gbm = joblib.load(model_path)
scores = gbm.feature_importances_
color_sets = cycle(['crimson', 'navy', 'teal', 'darkorange', 'purple', 'gray', 'green', 'dodgerblue', 'gold',
'lightcoral', 'red'])
def make_auto_pct(values):
def my_auto_pct(pct):
total = sum(values)
val = int(round(pct * total / 100.0))
# 同时显示数值和占比的饼图
return '{p:.1f}% ({v:d})'.format(p=pct, v=val)
return my_auto_pct
_, ax = plt.subplots(subplot_kw=dict(aspect="equal"))
ax.pie(scores, startangle=90, radius=1.3, pctdistance=0.9, colors=color_sets, autopct=make_auto_pct(scores),
textprops={'fontsize': 13, 'color': 'k'})
ax.legend(recipe, bbox_to_anchor=(1.0, 1.0), loc='center left')
ax.text(0.1, 1.6, 'Feature importance for LTR', fontsize=18, ha='center', va='top', wrap=True)
# plt.title('Feature importance for LTR', fontsize=18)
plt.savefig(fig_path, bbox_inches='tight')
plt.close(0)
| 35.053571 | 113 | 0.642384 |
7941e76e08aefc2c14e31c6a4a136434704c67f8 | 467 | py | Python | users/migrations/0002_auto_20210313_1801.py | ezekieltech/eduTech-backend | 33b82f57add98285b73d89bc9d97f499cdb3f1e4 | [
"MIT"
] | null | null | null | users/migrations/0002_auto_20210313_1801.py | ezekieltech/eduTech-backend | 33b82f57add98285b73d89bc9d97f499cdb3f1e4 | [
"MIT"
] | 15 | 2021-01-02T17:43:37.000Z | 2021-02-13T12:02:11.000Z | users/migrations/0002_auto_20210313_1801.py | ezekieltech/eduTech-backend | 33b82f57add98285b73d89bc9d97f499cdb3f1e4 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.5 on 2021-03-13 17:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='customuser',
name='role',
field=models.CharField(choices=[('Mentor', 'Mentor'), ('Mentee', 'Mentee'), ('Edu-Consultant', 'Edu-Consultant')], max_length=20),
),
]
| 24.578947 | 142 | 0.593148 |
7941e9f7e10a1ea7006e313a82152b07ba584c86 | 6,311 | py | Python | .vscode/arctictern.py | Code-Institute-Submissions/patmcdonald-PP1-HTML-CSS_FebResub | ba2d69f3ca0f670a34cee6471f90d905115e493d | [
"BSD-Source-Code"
] | 1 | 2022-01-13T13:51:02.000Z | 2022-01-13T13:51:02.000Z | .vscode/arctictern.py | Code-Institute-Submissions/patmcdonald-PP1-HTML-CSS_FebResub | ba2d69f3ca0f670a34cee6471f90d905115e493d | [
"BSD-Source-Code"
] | 29 | 2022-01-22T19:05:56.000Z | 2022-03-01T08:57:14.000Z | .vscode/arctictern.py | Code-Institute-Submissions/patmcdonald-PP1-HTML-CSS_FebResub | ba2d69f3ca0f670a34cee6471f90d905115e493d | [
"BSD-Source-Code"
] | 1 | 2022-02-21T12:06:01.000Z | 2022-02-21T12:06:01.000Z | """
arctictern.py
A little script that does a big migration
"""
import json
import os
import requests
import shutil
import subprocess
import sys
from os.path import exists
BASE_URL = "https://raw.githubusercontent.com/Code-Institute-Org/gitpod-full-template/master/"
BACKUP = True
MIGRATE = False
CURRENT_VERSION = 1.0
THIS_VERSION = 1.0
MIGRATE_FILE_LIST = [{"filename": ".theia/settings.json",
"url": ".vscode/settings.json"
},
{"filename": ".gitpod.yml",
"url": ".gitpod.yml"
},
{"filename": ".gitpod.dockerfile",
"url": ".gitpod.dockerfile"
},
{"filename": ".theia/heroku_config.sh",
"url": ".vscode/heroku_config.sh"
},
{"filename": ".theia/uptime.sh",
"url": ".vscode/uptime.sh"
},
{"filename": ".theia/init_tasks.sh",
"url": ".vscode/init_tasks.sh"
}]
UPGRADE_FILE_LIST = [{"filename": ".vscode/client.cnf",
"url": ".vscode/client.cnf"
},
{"filename": ".vscode/mysql.cnf",
"url": ".vscode/mysql.cnf"
},
{"filename": ".vscode/settings.json",
"url": ".vscode/settings.json"
},
{"filename": ".vscode/launch.json",
"url": ".vscode/launch.json"
},
{"filename": ".gitpod.yml",
"url": ".gitpod.yml"
},
{"filename": ".gitpod.dockerfile",
"url": ".gitpod.dockerfile"
},
{"filename": ".vscode/heroku_config.sh",
"url": ".vscode/heroku_config.sh"
},
{"filename": ".vscode/init_tasks.sh",
"url": ".vscode/init_tasks.sh"
},
{"filename": ".vscode/uptime.sh",
"url": ".vscode/uptime.sh"
},
{"filename": ".vscode/make_url.py",
"url": ".vscode/make_url.py"
},
{"filename": ".vscode/arctictern.py",
"url": ".vscode/arctictern.py"
}]
FINAL_LINES = "\nexport POST_UPGRADE_RUN=1\nsource ~/.bashrc\n"
def needs_upgrade():
"""
Checks the version of the current template against
this version.
Returns True if upgrade is needed, False if not.
"""
if exists(".vscode/version.txt"):
with open(".vscode/version.txt", "r") as f:
THIS_VERSION = float(f.read().strip())
else:
THIS_VERSION = 1.0
with open(".vscode/version.txt", "w") as f:
f.write(str(THIS_VERSION))
r = requests.get(BASE_URL + ".vscode/version.txt")
CURRENT_VERSION = float(r.content)
print(f"Upstream version: {CURRENT_VERSION}")
print(f"Local version: {THIS_VERSION}")
return CURRENT_VERSION > THIS_VERSION
def build_post_upgrade():
r = requests.get(BASE_URL + ".vscode/upgrades.json")
upgrades = json.loads(r.content.decode("utf-8"))
content = ""
for k,v in upgrades.items():
if float(k) > THIS_VERSION:
print(f"Adding version changes for {k} to post_upgrade.sh")
content += v
if content:
content += FINAL_LINES
with open(".vscode/post_upgrade.sh", "w") as f:
f.writelines(content)
print("Built post_upgrade.sh. Restart your workspace for it to take effect")
def process(file, suffix):
"""
Replaces and optionally backs up the files that
need to be changed.
Arguments: file - a path and filename
suffix - the suffix to the BASE_URL
"""
if BACKUP:
try:
shutil.copyfile(file, f"{file}.bak")
except FileNotFoundError:
print(f"{file} not found, a new one will be created")
with open(file, "wb") as f:
r = requests.get(BASE_URL + suffix)
f.write(r.content)
def start_migration():
"""
Calls the process function and
renames the directory
"""
if not os.path.isdir(".theia") and MIGRATE:
sys.exit("The .theia directory does not exist")
FILE_LIST = MIGRATE_FILE_LIST if MIGRATE else UPGRADE_FILE_LIST
if not MIGRATE and not os.path.isdir(".vscode"):
print("Creating .vscode directory")
os.mkdir(".vscode")
for file in FILE_LIST:
print(f"Processing: {file['filename']}")
process(file["filename"], file["url"])
if MIGRATE and os.path.isdir(".vscode"):
print(".vscode directory already exists")
if input("Overwrite? Y/N ").lower() == "y":
shutil.rmtree(".vscode")
else:
print("You will need to manually remove the .theia directory after migration.")
if MIGRATE and not os.path.isdir(".vscode"):
print("Renaming directory")
os.rename(".theia", ".vscode")
if not MIGRATE and needs_upgrade():
build_post_upgrade()
print("Changes saved.")
print("Please add, commit and push to GitHub.")
print("You may need to stop and restart your workspace for")
print("the changes to take effect.")
if __name__ == "__main__":
BACKUP = "--nobackup" not in sys.argv
MIGRATE = "--migrate" in sys.argv
print("CI Template Migration Utility 0.2")
print("---------------------------------")
print("The default action is to upgrade the workspace to the latest version.")
print(f"Usage: python3 {sys.argv[0]} [--nobackup --migrate]")
if not BACKUP:
print("If the --nobackup switch is provided, then changed files will not be backed up.")
if not MIGRATE:
print("If the --migrate switch is provided, the repo will be migrated from Theia to VS Code")
print()
if input("Start? Y/N ").lower() == "y":
start_migration()
else:
sys.exit("Migration cancelled by the user")
| 31.873737 | 101 | 0.524164 |
7941ebf7d25e463c031750c846f7658d800f0e9d | 4,608 | py | Python | tests/ut/python/parallel/test_sparse_feature_bprop.py | PowerOlive/mindspore | bda20724a94113cedd12c3ed9083141012da1f15 | [
"Apache-2.0"
] | 3,200 | 2020-02-17T12:45:41.000Z | 2022-03-31T20:21:16.000Z | tests/ut/python/parallel/test_sparse_feature_bprop.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | 176 | 2020-02-12T02:52:11.000Z | 2022-03-28T22:15:55.000Z | tests/ut/python/parallel/test_sparse_feature_bprop.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | 621 | 2020-03-09T01:31:41.000Z | 2022-03-30T03:43:19.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test sparse feature bprop """
import pytest
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore import context
from mindspore.common.parameter import Parameter
from mindspore.common.tensor import Tensor
from mindspore.ops import composite as C, operations as P
from mindspore.ops.operations.comm_ops import AllReduce
from mindspore.common.api import _cell_graph_executor
from mindspore.nn import TrainOneStepCell, Adam
grad_all = C.GradOperation(get_all=True)
@pytest.fixture(name="test_context")
def _test_context():
context.set_context(enable_sparse=True)
yield
context.set_context(enable_sparse=False)
context.reset_auto_parallel_context()
class GradWrap(nn.Cell):
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
def construct(self, x):
return grad_all(self.network)(x)
def test_bprop_with_sparse_feature_allreduce(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="hybrid_parallel")
class Net(nn.Cell):
def __init__(self, axis=0, shape=None):
super(Net, self).__init__()
if shape is None:
shape = [8, 8]
self.all_reduce = AllReduce()
self.gatherv2 = P.SparseGatherV2()
self.index = Tensor(np.ones(shape), dtype=ms.int32)
self.axis = axis
def construct(self, x):
out = self.all_reduce(x)
out = self.gatherv2(out, self.index, self.axis)
return out
net = GradWrap(Net())
x = Tensor(np.ones([64, 64]), dtype=ms.float32)
net.set_train()
_cell_graph_executor.compile(net, x)
def test_bprop_with_sparse_feature_mirror(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="semi_auto_parallel")
class Net(nn.Cell):
def __init__(self, shape=None):
super(Net, self).__init__()
if shape is None:
shape = [8, 8]
self.index = Tensor(np.ones(shape), dtype=ms.int32)
self.embeddinglookup = nn.EmbeddingLookup(64, 64, param_init='ones')
self.embeddinglookup.embeddinglookup.shard(((1, 1), (8, 1)))
def construct(self, x, b):
out = self.embeddinglookup(self.index)
return out
_x = Tensor(np.ones([126, 64, 32]), dtype=ms.float32)
_b = Tensor(np.ones([126, 64, 32]), dtype=ms.float32)
def compile_net(net):
optimizer = Adam(net.trainable_params(), learning_rate=0.1, loss_scale=1024.0, weight_decay=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_train()
_cell_graph_executor.compile(train_net, _x, _b)
net = Net()
compile_net(net)
def test_bprop_with_sparse_feature_dataparallel(test_context):
context.set_auto_parallel_context(device_num=8, global_rank=0, parallel_mode="data_parallel")
class Net(nn.Cell):
def __init__(self, axis=0, shape=None):
super(Net, self).__init__()
if shape is None:
shape = [8, 8]
weight = Tensor(np.ones([64, 64]), dtype=ms.float32)
self.weight = Parameter(weight, "w")
self.index = Tensor(np.ones(shape), dtype=ms.int32)
self.axis = axis
self.gatherv2 = P.SparseGatherV2()
def construct(self, x, b):
out = self.gatherv2(self.weight, self.index, self.axis)
return out
_x = Tensor(np.ones([126, 64, 32]), dtype=ms.float32)
_b = Tensor(np.ones([126, 64, 32]), dtype=ms.float32)
def compile_net(net):
optimizer = Adam(net.trainable_params(), learning_rate=0.1, loss_scale=1024.0, weight_decay=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_train()
_cell_graph_executor.compile(train_net, _x, _b)
net = Net()
compile_net(net)
| 34.133333 | 104 | 0.657552 |
7941ec507bc59e002087d8508825b95e65385c1f | 14,927 | py | Python | volttrontesting/platform/dbutils/test_influxdbutils.py | architpansare/volttron | ea8e7b3c8af9ce08dc736bd8a5c7b513d6b94fe9 | [
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null | volttrontesting/platform/dbutils/test_influxdbutils.py | architpansare/volttron | ea8e7b3c8af9ce08dc736bd8a5c7b513d6b94fe9 | [
"Apache-2.0",
"BSD-2-Clause"
] | 4 | 2021-05-28T19:12:54.000Z | 2022-02-18T03:07:40.000Z | volttrontesting/platform/dbutils/test_influxdbutils.py | architpansare/volttron | ea8e7b3c8af9ce08dc736bd8a5c7b513d6b94fe9 | [
"Apache-2.0",
"BSD-2-Clause"
] | 3 | 2021-06-12T19:49:56.000Z | 2022-02-12T20:14:38.000Z | from time import time
import os
from gevent import sleep
import pytest
try:
from influxdb import InfluxDBClient
except ImportError:
pytest.skip(
"Required imports for testing are not installed; thus, not running tests. "
"If on Ubuntu or Debian OS, install imports with: services/core/InfluxdbHistorian/scripts/install-influx.sh "
"Otherwise, see https://docs.influxdata.com/influxdb/v1.4/introduction/installation/.",
allow_module_level=True,
)
import volttron.platform.dbutils.influxdbutils as influxdbutils
from volttrontesting.fixtures.docker_wrapper import create_container
from volttrontesting.utils.utils import get_rand_port
IMAGES = ["influxdb:1.7"]
if "CI" not in os.environ:
IMAGES.extend(["influxdb:1.8.1", "influxdb:1.7.10"])
TEST_DATABASE = "test_historian"
ENV_INFLUXDB = {"INFLUXDB_DB": TEST_DATABASE}
ALLOW_CONNECTION_TIME = 10
@pytest.mark.dbutils
@pytest.mark.influxdbutils
def test_get_all_topics(get_container_func, ports_config):
get_container, image = get_container_func
with get_container(
image, ports=ports_config["ports"], env=ENV_INFLUXDB
) as container:
wait_for_connection(container)
points = [
{
"measurement": "meta",
"tags": {"topic_id": "sometopic_id"},
"time": 1465839830100400200,
"fields": {
"topic": "some_topic_name",
"meta_dict": str({"metadata1": "foobar"}),
},
}
]
add_data_to_measurement(ports_config, points)
expected_topics = ["some_topic_name"]
actual_topics = influxdbutils.get_all_topics(influxdb_client(ports_config))
assert actual_topics == expected_topics
@pytest.mark.dbutils
@pytest.mark.influxdbutils
@pytest.mark.parametrize(
"topic_id", [("a^p"), ("a[p-z]"), ("\\w+\\b"), ("fgfd$"), ("\\/foobar\\/")]
)
def test_get_topic_values_raises_value_error_on_regex(
get_container_func, ports_config, topic_id
):
with pytest.raises(ValueError):
influxdbutils.get_topic_values(
None, topic_id, None, None, None, None, None, None, None, None
)
@pytest.mark.dbutils
@pytest.mark.influxdbutils
@pytest.mark.parametrize(
"points, topic_id, start, end, agg_type, agg_period, skip, count, order,use_calendar_time_periods, expected_topic_values",
[
(
[
{
"measurement": "power_kw",
"tags": {
"device": "device1",
"building": "building1",
"campus": "campusa",
},
"fields": {"value": "somevalue"},
"time": 1465839830100400200,
}
],
"CampusA/Building1/Device1/Power_KW".lower(),
None,
None,
None,
None,
0,
1000,
"FIRST_TO_LAST",
False,
[("2016-06-13T17:43:50.100400+00:00", "somevalue")],
)
],
)
def test_get_topic_values(
get_container_func,
ports_config,
points,
topic_id,
start,
end,
agg_type,
agg_period,
skip,
count,
order,
use_calendar_time_periods,
expected_topic_values,
):
get_container, image = get_container_func
with get_container(
image, ports=ports_config["ports"], env=ENV_INFLUXDB
) as container:
wait_for_connection(container)
add_data_to_measurement(ports_config, points)
actual_topic_values = influxdbutils.get_topic_values(
influxdb_client(ports_config),
topic_id,
start,
end,
agg_type,
agg_period,
skip,
count,
order,
use_calendar_time_periods,
)
assert actual_topic_values == expected_topic_values
@pytest.mark.dbutils
@pytest.mark.influxdbutils
@pytest.mark.parametrize(
"points, topic_id, expected_meta",
[
(
[
{
"measurement": "meta",
"tags": {"topic_id": "sometopic_id"},
"time": 1465839830100400200,
"fields": {
"topic": "some_topic_name",
"meta_dict": str({"metadata1": "foobar", "metadata2": 42}),
"last_updated": "1465839830100400200",
},
}
],
"sometopic_id",
{"metadata1": "foobar", "metadata2": 42},
)
],
)
def test_get_topic_meta(
get_container_func, ports_config, points, topic_id, expected_meta
):
get_container, image = get_container_func
with get_container(
image, ports=ports_config["ports"], env=ENV_INFLUXDB
) as container:
wait_for_connection(container)
add_data_to_measurement(ports_config, points)
actual_meta = influxdbutils.get_topic_meta(
influxdb_client(ports_config), topic_id
)
assert actual_meta == expected_meta
@pytest.mark.dbutils
@pytest.mark.influxdbutils
@pytest.mark.parametrize(
"points, expected_results",
[
(
[
{
"measurement": "meta",
"tags": {"topic_id": "sometopic_id"},
"fields": {
"topic": "actual_topic_name",
"meta_dict": str({"metadata1": "foobar"}),
},
}
],
(
{"sometopic_id": "actual_topic_name"},
{"sometopic_id": {"metadata1": "foobar"}},
),
),
(
[
{
"measurement": "meta",
"tags": {"topic_id": "sometopic_id"},
"fields": {
"topic": "actual_topic_name1",
"meta_dict": str({"metadata1": "foobar"}),
},
},
{
"measurement": "meta",
"tags": {"topic_id": "other_id"},
"fields": {
"topic": "actual_topic_name2",
"meta_dict": str({"metadata2": 42}),
},
},
],
(
{
"sometopic_id": "actual_topic_name1",
"other_id": "actual_topic_name2",
},
{
"sometopic_id": {"metadata1": "foobar"},
"other_id": {"metadata2": 42},
},
),
),
],
)
def test_get_all_topic_id_and_meta(
get_container_func, ports_config, points, expected_results
):
get_container, image = get_container_func
with get_container(
image, ports=ports_config["ports"], env=ENV_INFLUXDB
) as container:
wait_for_connection(container)
add_data_to_measurement(ports_config, points)
actual_results = influxdbutils.get_all_topic_id_and_meta(
influxdb_client(ports_config)
)
assert actual_results == expected_results
@pytest.mark.dbutils
@pytest.mark.influxdbutils
@pytest.mark.parametrize(
"topic_id, topic, meta, updated_time, expected_data",
[
(
"sometopic_id",
"actual_topic_name",
{"metadata1": "foobar"},
"1465839830100400200",
[
{
"time": "1970-01-01T00:00:00Z",
"last_updated": "1465839830100400200",
"meta_dict": "{'metadata1': 'foobar'}",
"topic": "actual_topic_name",
"topic_id": "sometopic_id",
}
],
)
],
)
def test_insert_meta(
get_container_func, ports_config, topic_id, topic, meta, updated_time, expected_data
):
get_container, image = get_container_func
with get_container(
image, ports=ports_config["ports"], env=ENV_INFLUXDB
) as container:
wait_for_connection(container)
assert get_data(ports_config, "meta") == []
influxdbutils.insert_meta(
influxdb_client(ports_config), topic_id, topic, meta, updated_time
)
actual_results = get_data(ports_config, "meta")
assert actual_results == expected_data
@pytest.mark.dbutils
@pytest.mark.influxdbutils
@pytest.mark.parametrize(
"measurement, updatedtime, topic_id, source, value, value_string, expected_data",
[
(
"POWER_KW",
"2017-12-28T20:41:00.004260096Z",
"CampusA/Building1/Device1/POWER_KW",
"scrape",
"123.4",
"foobar",
[
{
"time": "2017-12-28T20:41:00.004260Z",
"building": "Building1",
"campus": "CampusA",
"device": "Device1",
"source": "scrape",
"value": "123.4",
"value_string": "foobar",
}
],
),
(
"OutsideAirTemperature",
"2017-12-28T20:41:00.004260096Z",
"CampusA/Building1/LAB/Device/OutsideAirTemperature",
"scrape",
"123.4",
"foobar",
[
{
"time": "2017-12-28T20:41:00.004260Z",
"building": "LAB",
"campus": "CampusA/Building1",
"device": "Device",
"source": "scrape",
"value": "123.4",
"value_string": "foobar",
}
],
),
(
"temp",
"2017-12-28T20:41:00.004260096Z",
"LAB/Device/temp",
"scrape",
"123.4",
"foobar",
[
{
"time": "2017-12-28T20:41:00.004260Z",
"building": "LAB",
"device": "Device",
"source": "scrape",
"value": "123.4",
"value_string": "foobar",
}
],
),
],
)
def test_insert_data_point(
get_container_func,
ports_config,
measurement,
updatedtime,
topic_id,
source,
value,
value_string,
expected_data,
):
get_container, image = get_container_func
with get_container(
image, ports=ports_config["ports"], env=ENV_INFLUXDB
) as container:
wait_for_connection(container)
assert get_data(ports_config, measurement) == []
influxdbutils.insert_data_point(
influxdb_client(ports_config),
updatedtime,
topic_id,
source,
value,
value_string,
)
actual_data = get_data(ports_config, measurement)
assert actual_data == expected_data
@pytest.mark.dbutils
@pytest.mark.influxdbutils
@pytest.mark.parametrize(
"pattern, expected_topics",
[
("actual", [{"actual_topic_name": "sometopic_id"}]),
(
"topic",
[
{"actual_topic_name": "sometopic_id"},
{"snafu_topic": "ghsfjkhkjf_ID"},
{"topic_snafu_2": "topic_id_42"},
],
),
("foo", []),
(
"^(snafu).*",
[{"snafu_Topic2": "other_topic_id"}, {"snafu_topic": "ghsfjkhkjf_ID"}],
),
("(name)$", [{"actual_topic_name": "sometopic_id"}]),
],
)
def test_get_topics_by_pattern(
get_container_func, ports_config, pattern, expected_topics
):
get_container, image = get_container_func
with get_container(
image, ports=ports_config["ports"], env=ENV_INFLUXDB
) as container:
wait_for_connection(container)
points = [
{
"measurement": "meta",
"tags": {"topic_id": "sometopic_id"},
"fields": {
"topic": "actual_topic_name",
"meta_dict": str({"metadata1": "foobar"}),
},
},
{
"measurement": "meta",
"tags": {"topic_id": "ghsfjkhkjf_ID"},
"fields": {
"topic": "snafu_topic",
"meta_dict": str({"metadata42": "foobar"}),
},
},
{
"measurement": "meta",
"tags": {"topic_id": "topic_id_42"},
"fields": {
"topic": "topic_snafu_2",
"meta_dict": str({"metadata42": "foobar"}),
},
},
{
"measurement": "meta",
"tags": {"topic_id": "other_topic_id"},
"fields": {
"topic": "snafu_Topic2",
"meta_dict": str({"metadata42": "foobar"}),
},
},
]
add_data_to_measurement(ports_config, points)
actual_topics = influxdbutils.get_topics_by_pattern(
influxdb_client(ports_config), pattern
)
assert actual_topics == expected_topics
@pytest.fixture(params=IMAGES)
def get_container_func(request):
return create_container, request.param
@pytest.fixture()
def ports_config():
port_on_host = get_rand_port(ip="8086")
return {"port_on_host": port_on_host, "ports": {"8086/tcp": port_on_host}}
def influxdb_client(ports_config):
connection_params = {
"host": "localhost",
"port": ports_config["port_on_host"],
"database": TEST_DATABASE,
}
return influxdbutils.get_client(connection_params)
def wait_for_connection(container):
sleep(ALLOW_CONNECTION_TIME)
query_database(container, f"use {TEST_DATABASE}")
def query_database(container, query):
cmd = f'influx -execute "{query}" -database test_historian'
start_time = time()
while time() - start_time < ALLOW_CONNECTION_TIME:
r = container.exec_run(cmd=cmd, tty=True)
print(r)
if r[0] != 0:
continue
else:
return
return RuntimeError(r)
def add_data_to_measurement(ports_config, points):
client = InfluxDBClient(
host="localhost", port=ports_config["port_on_host"], database=TEST_DATABASE
)
client.write_points(points)
def get_data(ports_config, measurement):
client = InfluxDBClient(
host="localhost", port=ports_config["port_on_host"], database=TEST_DATABASE
)
res = client.query(f"""SELECT * from {measurement}""", database=TEST_DATABASE)
return list(res.get_points())
| 29.21135 | 126 | 0.521337 |
7941ecad8e09ba911447c83dad5cd6e27dc30684 | 1,868 | py | Python | tutorials/tutorial_code/debugging_info/custom_metrics.py | xuweichn/docs | 794227e1b7d2dcbf3fc82cd203d32c59c6b933c6 | [
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-02-08T14:07:25.000Z | 2021-02-08T14:07:25.000Z | tutorials/tutorial_code/debugging_info/custom_metrics.py | xuweichn/docs | 794227e1b7d2dcbf3fc82cd203d32c59c6b933c6 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | tutorials/tutorial_code/debugging_info/custom_metrics.py | xuweichn/docs | 794227e1b7d2dcbf3fc82cd203d32c59c6b933c6 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""use metrics
The sample can be run on CPU/GPU/Ascend.
"""
import mindspore.nn as nn
from mindspore.nn import Momentum, SoftmaxCrossEntropyWithLogits
from mindspore import Model, context
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor
from src.dataset import create_train_dataset, create_eval_dataset
from src.net import Net
if __name__ == "__main__":
context.set_context(mode=context.GRAPH_MODE)
ds_train = create_train_dataset()
ds_eval = create_eval_dataset()
net = Net()
net_opt = Momentum(net.trainable_params(), 0.01, 0.9)
net_loss = SoftmaxCrossEntropyWithLogits(reduction='mean')
metrics = {
'Accuracy': nn.Accuracy(),
'Loss': nn.Loss(),
'Precision': nn.Precision(),
'Recall': nn.Recall(),
'F1_score': nn.F1()
}
config_ck = CheckpointConfig(save_checkpoint_steps=1000, keep_checkpoint_max=10)
ckpoint = ModelCheckpoint(prefix="CKPT", config=config_ck)
model = Model(network=net, loss_fn=net_loss, optimizer=net_opt, metrics=metrics)
model.train(epoch=2, train_dataset=ds_train, callbacks=[ckpoint, LossMonitor()])
result = model.eval(ds_eval)
print(result)
| 39.744681 | 84 | 0.707709 |
7941ee29faaa1d81c4a590a2c30b76d8330dd495 | 3,205 | py | Python | options/train_options.py | joverwey/Diverse-Motion-Stylization | 4791914046a953158f7430541670b8537924e893 | [
"MIT"
] | 7 | 2021-11-02T16:56:10.000Z | 2022-03-23T05:11:42.000Z | options/train_options.py | joverwey/Diverse-Motion-Stylization | 4791914046a953158f7430541670b8537924e893 | [
"MIT"
] | null | null | null | options/train_options.py | joverwey/Diverse-Motion-Stylization | 4791914046a953158f7430541670b8537924e893 | [
"MIT"
] | 2 | 2022-03-04T01:10:40.000Z | 2022-03-24T10:56:15.000Z | import os
from .base_options import BaseOptions
class TrainOptions(BaseOptions):
def __init__(self):
BaseOptions.__init__(self)
def initialize(self, parser):
parser = BaseOptions.initialize(self, parser)
# dataset parameter
parser.add_argument('--num_workers', type=int, default=0)
parser.add_argument('--train_dataroot', type=str, default='./datasets/styletransfer_generate.npz', help='path to training set')
parser.add_argument('--preproot', type=str, default='./datasets/preprocess_styletransfer_generate.npz', help='path to preprocess')
parser.add_argument('--clip_size', type=int, nargs='+', default=[64, 21])
# training parameters
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--resume_iter', type=int, default=0)
parser.add_argument('--total_iters', type=int, default=100000)
parser.add_argument('--g_lr', type=float, default=1e-4, help='learning rate for G')
parser.add_argument('--d_lr', type=float, default=1e-6, help='learning rate for D')
parser.add_argument('--e_lr', type=float, default=1e-6, help='learning rate for E')
parser.add_argument('--f_lr', type=float, default=1e-5, help='learning rate for F')
parser.add_argument('--lr_decay_every', type=int, default=100, help='learning rate decay step size')
parser.add_argument('--beta1', type=float, default=0.9)
parser.add_argument('--beta2', type=float, default=0.99)
parser.add_argument('--weight_decay', type=float, default=1e-4)
parser.add_argument('--lambda_adv', type=float, default=1.0)
parser.add_argument('--lambda_reg', type=float, default=1.0, help='weight for R1 regularization')
parser.add_argument('--lambda_con', type=float, default=1.0, help='weight for content reconstruction loss')
parser.add_argument('--lambda_sty', type=float, default=1.0, help='weight for style reconstruction loss')
parser.add_argument('--lambda_ds', type=float, default=1.0, help='weight for style diversification loss')
parser.add_argument('--lambda_cyc', type=float, default=1.0, help='weight for cycle loss')
parser.add_argument('--lambda_feet', type=float, default=1.0)
parser.add_argument('--ds_iter', type=int, default=100000)
# saving & loading
parser.add_argument('--net_print', type=bool, default=True)
parser.add_argument('--print_every', type=int, default=10)
parser.add_argument('--save_every', type=int, default=5000)
parser.add_argument('--save_latest_every', type=int, default=100)
parser.add_argument('--load_latest', action='store_true')
return parser
def check(self, opt):
assert opt.mode == 'train', 'Not a train mode!'
assert opt.num_domains == len(opt.domains), 'Number of domains does not match!'
def print_options(self, opt):
message = BaseOptions.print_options(self, opt)
file_name = os.path.join(opt.save_dir, '%s_opt.txt' % opt.mode)
with open(file_name, 'a') as opt_file:
opt_file.write(message)
opt_file.write('\n')
| 54.322034 | 138 | 0.671139 |
7941ee5e74935bab16b9ee91538200dace1c1509 | 7,479 | py | Python | plugins/aha.plugin.twitteroauth/twitteroauth/twitter.py | Letractively/aha-gae | e1209f7d44d1c59ff9d373b7d89d414f31a9c28b | [
"BSD-3-Clause"
] | null | null | null | plugins/aha.plugin.twitteroauth/twitteroauth/twitter.py | Letractively/aha-gae | e1209f7d44d1c59ff9d373b7d89d414f31a9c28b | [
"BSD-3-Clause"
] | null | null | null | plugins/aha.plugin.twitteroauth/twitteroauth/twitter.py | Letractively/aha-gae | e1209f7d44d1c59ff9d373b7d89d414f31a9c28b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
tipfy.ext.auth.twitter
~~~~~~~~~~~~~~~~~~~~~~
Implementation of Twitter authentication scheme.
Ported from `tornado.auth <http://github.com/facebook/tornado/blob/master/tornado/auth.py>`_.
:copyright: 2009 Facebook.
:copyright: 2010 tipfy.org.
:license: Apache License Version 2.0, see LICENSE.txt for more details.
"""
from __future__ import absolute_import
import functools
import logging
import urllib
from google.appengine.api import urlfetch
from django.utils import simplejson
#from tipfy import REQUIRED_VALUE
from plugin.twitteroauth.oauth import OAuthMixin
#: Default configuration values for this module. Keys are:
#:
#: - ``consumer_key``: Key provided when you register an application with
#: Twitter.
#: - ``consumer_secret``: Secret provided when you register an application
#: with Twitter.
#default_config = {
# 'consumer_key': REQUIRED_VALUE,
# 'consumer_secret': REQUIRED_VALUE,
#}
import aha
config = aha.Config()
class TwitterMixin(OAuthMixin):
"""A :class:`tipfy.RequestHandler` mixin that implements Twitter OAuth
authentication.
To authenticate with Twitter, register your application with
Twitter at http://twitter.com/apps. Then copy your Consumer Key and
Consumer Secret to the config:
<<code python>>
config['tipfy.ext.auth.twitter'] = {
'consumer_key': 'XXXXXXXXXXXXXXX',
'consumer_secret': 'XXXXXXXXXXXXXXX',
}
<</code>>
When your application is set up, you can use the TwitterMixin to
authenticate the user with Twitter and get access to their stream.
You must use the mixin on the handler for the URL you registered as your
application's Callback URL. For example:
<<code python>>
from tipfy import RequestHandler, abort
from tipfy.ext.auth.twitter import TwitterMixin
from tipfy.ext.session import CookieMixin, SessionMiddleware
class TwitterHandler(RequestHandler, CookieMixin, TwitterMixin):
middleware = [SessionMiddleware]
def get(self):
if self.request.args.get('oauth_token', None):
return self.get_authenticated_user(self._on_auth)
return self.authorize_redirect()
def _on_auth(self, user):
if not user:
abort(403)
# Set the user in the session.
# ...
<</code>>
The user object returned by get_authenticated_user() includes the
attributes 'username', 'name', and all of the custom Twitter user
attributes describe at
http://apiwiki.twitter.com/Twitter-REST-API-Method%3A-users%C2%A0show
in addition to 'access_token'. You should save the access token with
the user; it is required to make requests on behalf of the user later
with twitter_request().
"""
_OAUTH_REQUEST_TOKEN_URL = 'http://api.twitter.com/oauth/request_token'
_OAUTH_ACCESS_TOKEN_URL = 'http://api.twitter.com/oauth/access_token'
_OAUTH_AUTHORIZE_URL = 'http://api.twitter.com/oauth/authorize'
_OAUTH_AUTHENTICATE_URL = 'http://api.twitter.com/oauth/authenticate'
_OAUTH_NO_CALLBACKS = True
def _twitter_consumer_key(self):
return config.consumer_key
def _twitter_consumer_secret(self):
return config.consumer_secret
def _oauth_consumer_token(self):
return dict(
key = self._twitter_consumer_key(),
secret = self._twitter_consumer_secret())
def authenticate_redirect(self):
"""Just like authorize_redirect(), but auto-redirects if authorized.
This is generally the right interface to use if you are using
Twitter for single-sign on.
"""
url = self._oauth_request_token_url()
try:
response = urlfetch.fetch(url, deadline = 10)
except urlfetch.DownloadError, e:
logging.exception(e)
response = None
return self._on_request_token(self._OAUTH_AUTHENTICATE_URL, None,
response)
def twitter_request(self, path, callback, access_token = None,
post_args = None, **args):
"""Fetches the given API path, e.g., '/statuses/user_timeline/btaylor'
The path should not include the format (we automatically append
'.json' and parse the JSON output).
If the request is a POST, post_args should be provided. Query
string arguments should be given as keyword arguments.
All the Twitter methods are documented at
http://apiwiki.twitter.com/Twitter-API-Documentation.
Many methods require an OAuth access token which you can obtain
through authorize_redirect() and get_authenticated_user(). The
user returned through that process includes an 'access_token'
attribute that can be used to make authenticated requests via
this method. Example usage:
from tipfy import RequestHandler
from tipfy.ext.auth.twitter import TwitterMixin
class MainHandler(RequestHandler, TwitterMixin):
def get(self):
return self.twitter_request(
'/statuses/update',
post_args = {'status': 'Testing Twitter Mixin'},
access_token = user['access_token'],
callback = self._on_post)
def _on_post(self, new_entry):
if not new_entry:
# Call failed; perhaps missing permission?
return self.authorize_redirect()
return Response('Posted a message!')
"""
# Add the OAuth resource request signature if we have credentials
url = 'http://api.twitter.com/1' + path + '.json'
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
method = 'POST' if post_args is not None else 'GET'
oauth = self._oauth_request_parameters(
url, access_token, all_args, method = method)
args.update(oauth)
if args:
url += '?' + urllib.urlencode(args)
try:
if post_args is not None:
response = urlfetch.fetch(url, method = 'POST',
payload = urllib.urlencode(post_args), deadline = 10)
else:
response = urlfetch.fetch(url, deadline = 10)
except urlfetch.DownloadError, e:
logging.exception(e)
response = None
return self._on_twitter_request(callback, response)
def _on_twitter_request(self, callback, response):
if not response:
logging.warning('Could not get Twitter request token.')
return callback(None)
elif response.status_code < 200 or response.status_code >= 300:
logging.warning('Invalid Twitter response (%d): %s',
response.status_code, response.content)
return callback(None)
return callback(simplejson.loads(response.content))
def _oauth_get_user(self, access_token, callback):
callback = functools.partial(self._parse_user_response, callback)
return self.twitter_request(
'/users/show/' + access_token['screen_name'],
access_token = access_token, callback = callback)
def _parse_user_response(self, callback, user):
if user:
user['username'] = user['screen_name']
return callback(user)
| 35.614286 | 97 | 0.649151 |
7941eec5004d0b5d1a61f495f98eee698dc9ab1d | 44,959 | py | Python | tests/python/unittest/test_module.py | connorgoggins/incubator-mxnet | 37280e4ddf00cacdac50c1e798fd2a14da38ae8d | [
"Apache-2.0"
] | null | null | null | tests/python/unittest/test_module.py | connorgoggins/incubator-mxnet | 37280e4ddf00cacdac50c1e798fd2a14da38ae8d | [
"Apache-2.0"
] | null | null | null | tests/python/unittest/test_module.py | connorgoggins/incubator-mxnet | 37280e4ddf00cacdac50c1e798fd2a14da38ae8d | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import mxnet as mx
import mxnet.ndarray as nd
from mxnet.test_utils import *
import numpy as np
from functools import reduce
from mxnet.module.executor_group import DataParallelExecutorGroup
from common import setup_module, with_seed, assertRaises, teardown_module
from collections import namedtuple
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, "../train"))
from test_bucketing import train_model, prepare_bucketing_data
@with_seed()
def test_module_dtype():
dtype = np.float16
dshape = (3, 8, 7)
sym = mx.sym.Variable('data')
sym = mx.sym.Activation(data=sym, act_type='relu', __layout__='TNC')
mod = mx.mod.Module(sym, ('data',), None, context=[mx.cpu(0), mx.cpu(1)])
mod.bind(data_shapes=[mx.io.DataDesc('data', dshape, dtype, layout='TNC')])
mod.init_params()
mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape, dtype=dtype)],
label=None))
mod.backward([mx.nd.ones(dshape, dtype=dtype)])
for x in mod.get_outputs():
assert x.dtype == dtype
def test_module_bind():
sym = mx.sym.Variable('data')
sym = mx.sym.Activation(data=sym, act_type='relu', __layout__='TNC')
mod = mx.mod.Module(sym, ('data',), None, context=[mx.cpu(0), mx.cpu(1)])
assertRaises(TypeError, mod.bind, data_shapes=[('data', mx.nd.array([10,10]))])
assert mod.binded == False
mod.bind(data_shapes=[('data', (10,10))])
assert mod.binded == True
@with_seed()
def test_module_input_grads():
a = mx.sym.Variable('a', __layout__='NC')
b = mx.sym.Variable('b', __layout__='NC')
c = mx.sym.Variable('c', __layout__='NC')
c = a + 2 * b + 3 * c
net = mx.mod.Module(c, data_names=['b', 'c', 'a'], label_names=None,
context=[mx.cpu(0), mx.cpu(1)])
net.bind(data_shapes=[['b', (5, 5)], ['c', (5, 5)], ['a', (5, 5)]],
label_shapes=None, inputs_need_grad=True)
net.init_params()
net.forward(data_batch=mx.io.DataBatch(data=[nd.ones((5, 5)),
nd.ones((5, 5)),
nd.ones((5, 5))]))
net.backward(out_grads=[nd.ones((5, 5))])
input_grads = net.get_input_grads()
b_grad = input_grads[0].asnumpy()
c_grad = input_grads[1].asnumpy()
a_grad = input_grads[2].asnumpy()
assert np.all(a_grad == 1), a_grad
assert np.all(b_grad == 2), b_grad
assert np.all(c_grad == 3), c_grad
@with_seed()
def test_module_ctx_group():
def check_module_ctx_group(ctxs, group2ctxs, grad_ctxs=None):
with mx.AttrScope(ctx_group='dev1'):
a = mx.symbol.Variable('a')
a = a * 2
with mx.AttrScope(ctx_group='dev2'):
b = mx.symbol.Variable('b')
c = a + b
shape = (2, 5)
mod1 = mx.mod.Module(c, context=ctxs, data_names=['a', 'b'], label_names=None,
group2ctxs=group2ctxs)
mod1.bind(data_shapes=[['a', shape], ['b', shape]], inputs_need_grad=True)
mod1.init_params()
mod1.forward(data_batch=mx.io.DataBatch(data=[mx.nd.ones(shape), mx.nd.ones(shape)]), is_train=True)
mod1.backward([mx.nd.ones(shape)])
mod1_input_grads = mod1.get_input_grads()
mod2 = mx.mod.Module(c, context=ctxs, data_names=['a', 'b'], label_names=None)
mod2.bind(data_shapes=[['a', shape], ['b', shape]], inputs_need_grad=True)
mod2.init_params()
mod2.forward(data_batch=mx.io.DataBatch(data=[mx.nd.ones(shape), mx.nd.ones(shape)]), is_train=True)
mod2.backward([mx.nd.ones(shape)])
mod2_input_grads = mod2.get_input_grads()
if grad_ctxs is not None:
assert(mod1_input_grads[0].context == grad_ctxs[0])
assert(mod1_input_grads[1].context == grad_ctxs[1])
assert(np.all(mod1_input_grads[0].asnumpy() == mod2_input_grads[0].asnumpy()))
assert(np.all(mod1_input_grads[1].asnumpy() == mod2_input_grads[1].asnumpy()))
check_module_ctx_group([mx.cpu(0)], {'dev1': mx.cpu(1), 'dev2': mx.cpu(2)}, grad_ctxs=[mx.cpu(1), mx.cpu(2)])
check_module_ctx_group([mx.cpu(0), mx.cpu(1)],
[{'dev1': mx.cpu(2), 'dev2': mx.cpu(3)}, {'dev1': mx.cpu(4), 'dev2': mx.cpu(5)}])
check_module_ctx_group([mx.cpu(0), mx.cpu(1)], {'dev1': mx.cpu(2), 'dev2': mx.cpu(3)})
check_module_ctx_group([mx.cpu(0), mx.cpu(1)], {'dev1': mx.cpu(2), 'dev2': [mx.cpu(3)]})
check_module_ctx_group([mx.cpu(0), mx.cpu(1)], {'dev1':mx.cpu(2), 'dev2':[mx.cpu(3), mx.cpu(3)]})
check_module_ctx_group([mx.cpu(0), mx.cpu(1)],
{'dev1':[mx.cpu(2), mx.cpu(2)], 'dev2':[mx.cpu(3), mx.cpu(3)]})
@with_seed()
def test_bucket_module_ctx_group():
num_hidden = 10
batch_size = 5
def sym_gen(seq_len):
with mx.AttrScope(ctx_group='dev1'):
data = mx.symbol.Variable('data')
weight = mx.symbol.Variable('dev1_weight')
bias = mx.symbol.Variable('dev1_bias')
fc = data
for i in range(seq_len):
fc = mx.symbol.FullyConnected(data=fc, weight=weight, bias=bias,
name='dev1_fc_%d' % i, num_hidden=num_hidden)
with mx.AttrScope(ctx_group='dev2'):
label = mx.symbol.Variable('label')
weight = mx.symbol.Variable('dev2_weight')
bias = mx.symbol.Variable('dev2_bias')
for i in range(seq_len):
fc = mx.symbol.FullyConnected(data=fc, weight=weight, bias=bias,
name='dev2_fc_%d' % i, num_hidden=num_hidden)
sym = mx.symbol.SoftmaxOutput(fc, label, name='softmax')
return sym, ('data',), ('label',)
mod = mx.mod.BucketingModule(sym_gen=sym_gen, default_bucket_key=10, context=[mx.cpu(0)],
group2ctxs=[{'dev1': mx.cpu(1), 'dev2': mx.cpu(2)}])
mod.bind(data_shapes=[['data', (batch_size, num_hidden)]],
label_shapes=[['label', (batch_size,)]],
for_training=True, inputs_need_grad=True)
assert(mod.binded)
@with_seed()
def test_module_layout():
sym = mx.sym.Variable('data')
sym = mx.sym.Activation(data=sym, act_type='relu', __layout__='TNC')
dshape = (3, 8, 7)
mod = mx.mod.Module(sym, ('data',), None, context=[mx.cpu(0), mx.cpu(1)])
mod.bind(data_shapes=[mx.io.DataDesc('data', dshape, layout='TNC')])
mod.init_params()
mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)],
label=None))
mod.backward([mx.nd.ones(dshape)])
assert mod.get_outputs()[0].shape == dshape
hdshape = (3, 4, 7)
for x in mod.get_outputs(merge_multi_context=False)[0]:
assert x.shape == hdshape
@with_seed()
def test_save_load():
previous_update_on_kvstore = os.getenv('MXNET_UPDATE_ON_KVSTORE', "1")
os.putenv('MXNET_UPDATE_ON_KVSTORE', '1')
def dict_equ(a, b):
assert set(a) == set(b)
for k in a:
assert (a[k].asnumpy() == b[k].asnumpy()).all()
sym = mx.sym.Variable('data')
sym = mx.sym.FullyConnected(sym, num_hidden=100)
# single device
mod = mx.mod.Module(sym, ('data',))
mod.bind(data_shapes=[('data', (10, 10))])
mod.init_params()
mod.init_optimizer(optimizer_params={'learning_rate':0.1, 'momentum':0.9})
mod.update()
mod.save_checkpoint('test', 0, save_optimizer_states=True)
mod2 = mx.mod.Module.load('test', 0, load_optimizer_states=True, data_names=('data',))
mod2.bind(data_shapes=[('data', (10, 10))])
mod2.init_optimizer(optimizer_params={'learning_rate':0.1, 'momentum':0.9})
assert mod._symbol.tojson() == mod2._symbol.tojson()
dict_equ(mod.get_params()[0], mod2.get_params()[0])
dict_equ(mod._updater.states, mod2._updater.states)
# multi device
mod = mx.mod.Module(sym, ('data',), context=[mx.cpu(0), mx.cpu(1)])
mod.bind(data_shapes=[('data', (10, 10))])
mod.init_params()
mod.init_optimizer(optimizer_params={'learning_rate':0.1, 'momentum':0.9})
mod.update()
mod.save_checkpoint('test', 0, save_optimizer_states=True)
mod2 = mx.mod.Module.load('test', 0, load_optimizer_states=True, data_names=('data',))
mod2.bind(data_shapes=[('data', (10, 10))])
mod2.init_optimizer(optimizer_params={'learning_rate':0.1, 'momentum':0.9})
assert mod._symbol.tojson() == mod2._symbol.tojson()
dict_equ(mod.get_params()[0], mod2.get_params()[0])
dict_equ(mod._kvstore._updater.states, mod2._updater.states)
os.putenv('MXNET_UPDATE_ON_KVSTORE', previous_update_on_kvstore)
@with_seed()
def test_bucketing_save_load():
previous_update_on_kvstore = os.getenv('MXNET_UPDATE_ON_KVSTORE', "1")
os.putenv('MXNET_UPDATE_ON_KVSTORE', '1')
def dict_equ(a, b):
assert set(a) == set(b)
for k in a:
assert (a[k].asnumpy() == b[k].asnumpy()).all()
len_vocab = 50
num_embed = 25
num_epochs = 5
batch_size = 128
num_layers = 2
num_hidden = 25
buckets = [5, 10, 20, 30, 40]
invalid_label = -1
num_sentence=1000
stack = mx.rnn.SequentialRNNCell()
for i in range(num_layers):
stack.add(mx.rnn.LSTMCell(num_hidden=num_hidden, prefix='lstm_l%d_' % i))
def sym_gen(seq_len):
data = mx.sym.Variable('data')
label = mx.sym.Variable('softmax_label')
embed = mx.sym.Embedding(data=data, input_dim=len_vocab,
output_dim=num_embed, name='embed')
stack.reset()
outputs, states = stack.unroll(seq_len, inputs=embed, merge_outputs=True)
pred = mx.sym.Reshape(outputs, shape=(-1, num_hidden))
pred = mx.sym.FullyConnected(data=pred, num_hidden=len_vocab, name='pred')
label = mx.sym.Reshape(label, shape=(-1,))
loss = mx.sym.SoftmaxOutput(data=pred, label=label, name='softmax')
return loss, ('data',), ('softmax_label',)
model = train_model(context=mx.current_context())
model.save_checkpoint("test", 0)
data_train, data_val = prepare_bucketing_data(buckets, len_vocab, batch_size, invalid_label, num_sentence)
mod2 = mx.mod.BucketingModule.load('test', 0, sym_gen=sym_gen,
default_bucket_key=data_train.default_bucket_key)
mod2.bind(data_shapes=data_train.provide_data,
label_shapes=data_train.provide_label)
for bucket_key in model._buckets.keys():
dict_equ(model._buckets[model._default_bucket_key].get_params()[0],
mod2._buckets[mod2._default_bucket_key].get_params()[0])
mod2.fit(
train_data=data_train,
eval_data=data_val,
eval_metric=mx.gluon.metric.Perplexity(invalid_label), # Use Perplexity for multiclass classification.
kvstore='device',
optimizer='sgd',
optimizer_params={'learning_rate': 0.01,
'momentum': 0,
'wd': 0.00001},
initializer=mx.init.Xavier(factor_type="in", magnitude=2.34),
num_epoch=num_epochs,
batch_end_callback=mx.callback.Speedometer(batch_size, 50))
os.putenv('MXNET_UPDATE_ON_KVSTORE', previous_update_on_kvstore)
@with_seed()
def test_module_reshape():
data = mx.sym.Variable('data')
sym = mx.sym.FullyConnected(data, num_hidden=20, name='fc')
dshape = (7, 20)
mod = mx.mod.Module(sym, ('data',), None, context=[mx.cpu(0), mx.cpu(1)])
mod.bind(data_shapes=[('data', dshape)])
mod.init_params()
mod.init_optimizer(optimizer_params={'learning_rate': 1})
mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)],
label=None))
mod.backward([mx.nd.ones(dshape)])
mod.update()
assert mod.get_outputs()[0].shape == dshape
assert (mod.get_params()[0]['fc_bias'].asnumpy() == -1).all()
dshape = (14, 20)
mod.reshape(data_shapes=[('data', dshape)])
mod.forward(mx.io.DataBatch(data=[mx.nd.ones(dshape)],
label=None))
mod.backward([mx.nd.ones(dshape)])
mod.update()
assert mod.get_outputs()[0].shape == dshape
assert (mod.get_params()[0]['fc_bias'].asnumpy() == -3).all()
@with_seed()
def test_module_states():
stack = mx.rnn.SequentialRNNCell()
for i in range(2):
stack.add(mx.rnn.LSTMCell(num_hidden=20, prefix='lstm_l%d_'%i))
begin_state = stack.begin_state(func=mx.sym.Variable)
_, states = stack.unroll(10, begin_state=begin_state, inputs=mx.sym.Variable('data'))
state_names = [i.name for i in begin_state]
mod = mx.mod.Module(mx.sym.Group(states), context=[mx.cpu(0), mx.cpu(1)],
label_names=None, state_names=state_names)
mod.bind(data_shapes=[('data', (5, 10))], label_shapes=None, for_training=False)
mod.init_params()
batch = mx.io.DataBatch(data=[mx.nd.zeros((5, 10))], label=[])
mod.set_states(value=1)
mod.forward(batch)
out = mod.get_outputs(merge_multi_context=False)
out1 = mod.get_outputs(merge_multi_context=True)
mod.set_states(states=out)
mod.forward(batch)
out2 = mod.get_outputs(merge_multi_context=True)
for x1, x2 in zip(out1, out2):
assert not mx.test_utils.almost_equal(x1.asnumpy(), x2.asnumpy(), rtol=1e-3)
@with_seed()
def test_module_switch_bucket():
vocab_dim = 5000
num_hidden = 100
num_embedding = 100
num_layer = 2
default_key = 10
test_key = 5
batch_size = 32
contexts = [mx.cpu(0)]
initializer = mx.init.Xavier(factor_type="in", magnitude=2.34)
#generate symbols for an LSTM network
def sym_gen(seq_len):
data = mx.sym.Variable('data')
label = mx.sym.Variable('softmax_label')
embed = mx.sym.Embedding(data=data, input_dim=vocab_dim,
output_dim=num_embedding)
stack = mx.rnn.SequentialRNNCell()
for i in range(num_layer):
stack.add(mx.rnn.LSTMCell(num_hidden=num_hidden, prefix='lstm_l%d_'%i))
outputs, states = stack.unroll(seq_len, inputs=embed, merge_outputs=True)
pred = mx.sym.Reshape(outputs, shape=(-1, num_hidden))
pred = mx.sym.FullyConnected(data=pred, num_hidden=vocab_dim, name='pred')
label = mx.sym.Reshape(label, shape=(-1,))
pred = mx.sym.SoftmaxOutput(data=pred, label=label, name='softmax')
return pred, ('data',), ('softmax_label',)
def create_bucketing_module(key):
model = mx.mod.BucketingModule(
sym_gen = sym_gen,
default_bucket_key = key,
context = contexts)
model.bind([('data', (batch_size, key))],
[('softmax_label', (batch_size, key))], True, False)
model.init_params(initializer=initializer)
return model
#initialize the bucketing module with the default bucket key
bucketing_model = create_bucketing_module(default_key)
#check name
assert bucketing_model.symbol.list_arguments()[1] == "embedding0_weight",\
"Error in assigning names for args in BucketingModule"
#switch to test_key
bucketing_model.switch_bucket(test_key, [('data', (batch_size, test_key))],
[('softmax_label', (batch_size, test_key))])
total_bytes_before = bucketing_model._buckets[default_key]._total_exec_bytes
#remove test_key and switch again
del bucketing_model._buckets[test_key]
bucketing_model.switch_bucket(test_key, [('data', (batch_size, test_key))],
[('softmax_label', (batch_size, test_key))])
total_bytes_after = bucketing_model._buckets[default_key]._total_exec_bytes
#the default bucket is expected to reuse the bytes allocated
assert total_bytes_after == total_bytes_before
# roywei: Getting rid of fixed seed as flakiness could not be reproduced,
# tracked at: https://github.com/apache/incubator-mxnet/issues/11705
@with_seed()
def test_module_set_params():
# data iter
data = mx.nd.array([[0.05, .10]]);
label = mx.nd.array([[.01, 0.99]]);
train_data = mx.io.NDArrayIter(data, label, batch_size=1)
# symbols
x = mx.symbol.Variable('data')
x = mx.symbol.FullyConnected(name='fc_0', data=x, num_hidden=2)
x = mx.symbol.Activation(name="act_0", data=x, act_type='sigmoid')
x = mx.symbol.FullyConnected(name='fc_1', data=x, num_hidden=2)
x = mx.symbol.Activation(name="act_1", data=x, act_type='sigmoid')
x = mx.symbol.LinearRegressionOutput(data=x, name='softmax', grad_scale=2)
# create module
mod = mx.mod.Module(x, context=[mx.cpu()]);
mod.bind(train_data.provide_data, label_shapes=train_data.provide_label,
for_training=True)
arg_params_correct = {'fc_0_weight': mx.nd.array([[.15, .20], [.25, .30]]),
'fc_0_bias' : mx.nd.array([.35, .35]),
'fc_1_weight': mx.nd.array([[.40, .45], [.50, .55]]),
'fc_1_bias' : mx.nd.array([.60, .60])}
arg_params_missing = {'fc_0_weight': mx.nd.array([[.15, .20], [.25, .30]]),
'fc_0_bias' : mx.nd.array([.35, .35]),
'fc_1_weight': mx.nd.array([[.40, .45], [.50, .55]])}
arg_params_extra = {'fc_0_weight': mx.nd.array([[.15, .20], [.25, .30]]),
'fc_0_bias' : mx.nd.array([.35, .35]),
'fc_1_weight': mx.nd.array([[.40, .45], [.50, .55]]),
'fc_1_bias' : mx.nd.array([.60, .60]),
'fc_2_weight': mx.nd.array([.60, .60])}
arg_params_missing_extra = {'fc_2_weight': mx.nd.array([.60, .60])}
# test regular set_params
mod.set_params(force_init=True, arg_params=arg_params_correct, aux_params={})
# test allow missing
mod.set_params(force_init=True, arg_params=arg_params_missing, aux_params={}, allow_missing=True)
assertRaises(RuntimeError, mod.set_params,
force_init=True, arg_params=arg_params_missing,
aux_params={}, allow_missing=False)
# test allow extra
mod.set_params(force_init=True, arg_params=arg_params_extra, aux_params={}, allow_missing=True, allow_extra=True)
assertRaises(ValueError, mod.set_params,
force_init=True, arg_params=arg_params_extra,
aux_params={}, allow_missing=True, allow_extra=False)
# test allow missing + extra,
assertRaises(RuntimeError, mod.set_params,
force_init=True, arg_params=arg_params_missing_extra,
aux_params={}, allow_missing=False, allow_extra=False)
# test allow missing + extra, this will throw a runtime error
assertRaises(ValueError, mod.set_params,
force_init=True, arg_params=arg_params_missing_extra,
aux_params={}, allow_missing=True, allow_extra=False)
@with_seed()
def test_monitor():
# data iter
data = mx.nd.array([[0.05, .10]]);
label = mx.nd.array([[.01, 0.99]]);
train_data = mx.io.NDArrayIter(data, label, batch_size=1)
# symbols
x = mx.symbol.Variable('data')
x = mx.symbol.FullyConnected(name='fc_0', data=x, num_hidden=2)
x = mx.symbol.Activation(name="act_0", data=x, act_type='sigmoid')
x = mx.symbol.FullyConnected(name='fc_1', data=x, num_hidden=2)
x = mx.symbol.Activation(name="act_1", data=x, act_type='sigmoid')
x = mx.symbol.LinearRegressionOutput(data=x, name='softmax', grad_scale=2)
# create monitor
def mean_abs(x):
sum_abs = mx.ndarray.sum(mx.ndarray.abs(x))
return mx.ndarray.divide(sum_abs, reduce(lambda x, y: x * y, x.shape))
mon = mx.mon.Monitor(1, stat_func=mean_abs, pattern='.*', sort=True)
# create module
mod = mx.mod.Module(x, context=[mx.cpu()]);
mod.bind(train_data.provide_data, label_shapes=train_data.provide_label,
for_training=True)
mod.install_monitor(mon)
arg_params = {'fc_0_weight': mx.nd.array([[.15, .20], [.25, .30]]),
'fc_0_bias' : mx.nd.array([.35, .35]),
'fc_1_weight': mx.nd.array([[.40, .45], [.50, .55]]),
'fc_1_bias' : mx.nd.array([.60, .60])}
mod.init_params(arg_params=arg_params)
data_iter = iter(train_data)
data_batch = next(data_iter)
mon.tic()
mod.forward_backward(data_batch)
res = mon.toc()
keys = ['act_0', 'act_1', 'data', 'fc_0', 'fc_1', 'softmax']
mon_result_counts = [0, 0, 0, 0, 0, 0]
assert(len(res) == 21)
for n, k, v in res:
for idx, key in enumerate(keys):
if k.startswith(key):
mon_result_counts[idx] += 1
break
assert(mon_result_counts == [2, 2, 1, 6, 6, 4])
@with_seed()
def test_executor_group():
def get_rnn_sym(num_layers, num_words, num_hidden, num_embed, seq_len, sparse_embedding):
stack = mx.rnn.SequentialRNNCell()
for i in range(num_layers):
stack.add(mx.rnn.LSTMCell(num_hidden=num_hidden, prefix='lstm_l%d_' % i))
data = mx.sym.Variable('data')
label = mx.sym.Variable('softmax_label')
if sparse_embedding:
embed_weight = mx.sym.Variable('embed_weight', stype='row_sparse')
embed = mx.sym.contrib.SparseEmbedding(data=data, input_dim=num_words,
weight=embed_weight, output_dim=num_embed,
name='embed')
else:
embed = mx.sym.Embedding(data=data, input_dim=num_words,
output_dim=num_embed, name='embed')
stack.reset()
outputs, states = stack.unroll(seq_len, inputs=embed, merge_outputs=True)
pred = mx.sym.Reshape(outputs, shape=(-1, num_hidden))
pred = mx.sym.FullyConnected(data=pred, num_hidden=num_words, name='pred')
label = mx.sym.Reshape(label, shape=(-1,))
pred = mx.sym.SoftmaxOutput(data=pred, label=label, name='softmax')
return pred
def test_shared_exec_group(exec_grp_shared, exec_grp_created, shared_arg_names=None,
extra_args=None, check_shared_grad=True):
# Test shared data arrays
for i in range(len(exec_grp_shared.execs)):
# test same shared_data_arrays for two exec groups
shared_data_array1 = exec_grp_shared.shared_data_arrays[i]
shared_data_array2 = exec_grp_created.shared_data_arrays[i]
if extra_args is not None:
assert len(shared_data_array1) == len(extra_args),\
"exec_grp_shared.shared_data_arrays[%d] should have same number of args as extra_args"
assert len(shared_data_array1) == len(shared_data_array2),\
"length of shared_data_array of the shared executor group not equal to the created executor group"
for k, v in shared_data_array1.items():
if extra_args is not None:
assert k in extra_args, "arg %s is not in extra_args" % k
assert k in shared_data_array2,\
"arg %s of the shared executor group not in the shared_data_array of the created executor group" % k
assert mx.test_utils.same_array(v, shared_data_array2[k])
for data_name, array in exec_grp_shared.shared_data_arrays[i].items():
assert data_name in exec_grp_created.shared_data_arrays[i], \
"Shared input data '%s' is not in " \
"shared_data_arrays of created executor group." % (data_name)
assert mx.test_utils.same_array(array, exec_grp_created.shared_data_arrays[i][data_name]), \
"Shared input data '%s' does not share memory." % (data_name)
# Test shared argument arrays and gradient arrays
exec_shared = exec_grp_shared.execs[i]
exec_created = exec_grp_created.execs[i]
if shared_arg_names is not None:
# test shared arguments
for arg_name in shared_arg_names:
assert arg_name in exec_created.arg_dict, \
"Shared argument '%s' is not in arg_dict of created executor group." % (arg_name)
assert mx.test_utils.same_array(exec_shared.arg_dict[arg_name], exec_created.arg_dict[arg_name]), \
"Shared argument '%s' does not share memory." % (arg_name)
# test shared argument gradients
if check_shared_grad:
for arg_name in shared_arg_names:
assert arg_name in exec_created.grad_dict, \
"Shared argument gradient '%s' is not in " \
"grad_dict of created executor group." % (arg_name)
assert mx.test_utils.same_array(exec_shared.grad_dict[arg_name], \
exec_created.grad_dict[arg_name]), \
"Shared argument gradient '%s' does not share memory." % (arg_name)
for arg_name, grad in exec_grp_shared.grad_req.items():
assert grad == exec_grp_created.grad_req[arg_name], \
"Gradient requirements for shared argument '%s' are inconsistent. " \
"Shared executor group requires '%s' while created executor group requires '%s'" \
%(arg_name, grad, exec_grp_created.grad_req[arg_name])
def check_shared_exec_group(sparse_embedding):
# generate an rnn sym with #layers=5
sym = get_rnn_sym(num_layers=3, num_words=num_words, num_hidden=num_hidden,
num_embed=num_embed, seq_len=max_bucket_size,
sparse_embedding=sparse_embedding)
arg_names1 = sym.list_arguments()
input_names = [name[0] for name in data_shapes] + [name[0] for name in label_shapes]
shared_arg_names = [name for name in arg_names1 if name not in input_names]
exec_group1 = DataParallelExecutorGroup(symbol=sym, contexts=contexts,
workload=workload, data_shapes=data_shapes,
label_shapes=label_shapes, param_names=shared_arg_names,
for_training=True, inputs_need_grad=False)
# shared_data_arrays should only have input "data" and "softmax_label" arrays
for i in range(len(contexts)):
assert len(exec_group1.shared_data_arrays[i]) == len(input_names),\
"exec_group1.shared_data_arrays[%d] should have the same number of names as in input_names" % i
for name in input_names:
assert name in exec_group1.shared_data_arrays[i],\
"arg %s should be in exec_group1.shared_data_arrays[%d]" % (name, i)
# generate an rnn sym with #layers=5
sym = get_rnn_sym(num_layers=5, num_words=num_words, num_hidden=num_hidden,
num_embed=num_embed, seq_len=max_bucket_size,
sparse_embedding=sparse_embedding)
arg_names2 = sym.list_arguments()
exec_group2 = DataParallelExecutorGroup(symbol=sym, contexts=contexts,
workload=workload, data_shapes=data_shapes,
label_shapes=label_shapes, param_names=shared_arg_names,
for_training=True, inputs_need_grad=False,
shared_group=exec_group1)
extra_args = [name for name in arg_names2 if name not in shared_arg_names]
check_shared_grad = not sparse_embedding
test_shared_exec_group(exec_grp_shared=exec_group1, exec_grp_created=exec_group2,
shared_arg_names=shared_arg_names, extra_args=extra_args,
check_shared_grad=check_shared_grad)
contexts = [mx.cpu(0), mx.cpu(1)]
workload = [1] * len(contexts)
batch_size = 32
max_bucket_size = 80
num_words = 1000
num_hidden = 100
num_embed = 200
data_shapes = [('data', (batch_size, max_bucket_size))]
label_shapes = [('softmax_label', (batch_size, max_bucket_size))]
sparse_embedding_opt = [True, False]
for opt in sparse_embedding_opt:
check_shared_exec_group(opt)
@with_seed()
def test_factorization_machine_module():
""" Test factorization machine model with sparse operators """
# this unit test is to test the flow, training accuracy is tested in another test
def check_factorization_machine_module(num_epochs=None):
print("check_factorization_machine_module")
def fm(factor_size, feature_dim, init):
x = mx.symbol.Variable("data", stype='csr')
v = mx.symbol.Variable("v", shape=(feature_dim, factor_size),
init=init, stype='row_sparse')
w1_weight = mx.symbol.var('w1_weight', shape=(feature_dim, 1),
init=init, stype='row_sparse')
w1_bias = mx.symbol.var('w1_bias', shape=(1))
w1 = mx.symbol.broadcast_add(mx.symbol.dot(x, w1_weight), w1_bias)
v_s = mx.symbol._internal._square_sum(data=v, axis=1, keepdims=True)
x_s = mx.symbol.square(data=x)
bd_sum = mx.sym.dot(x_s, v_s)
w2 = mx.symbol.dot(x, v)
w2_squared = 0.5 * mx.symbol.square(data=w2)
w_all = mx.symbol.Concat(w1, w2_squared, dim=1)
sum1 = mx.symbol.sum(data=w_all, axis=1, keepdims=True)
sum2 = 0.5 * mx.symbol.negative(bd_sum)
model = mx.sym.elemwise_add(sum1, sum2)
y = mx.symbol.Variable("label")
model = mx.symbol.LinearRegressionOutput(data=model, label=y)
return model
# model
init = mx.initializer.Normal(sigma=0.01)
factor_size = 4
feature_dim = 10000
model = fm(factor_size, feature_dim, init)
# data iter
num_batches = 5
batch_size = 64
num_samples = batch_size * num_batches
# generate some random csr data
csr_nd = rand_ndarray((num_samples, feature_dim), 'csr', 0.1)
label = mx.nd.ones((num_samples,1))
# the alternative is to use LibSVMIter
train_iter = mx.io.NDArrayIter(data=csr_nd,
label={'label':label},
batch_size=batch_size,
last_batch_handle='discard')
# create module
mod = mx.mod.Module(symbol=model, data_names=['data'], label_names=['label'])
# allocate memory by given the input data and lable shapes
mod.bind(data_shapes=train_iter.provide_data, label_shapes=train_iter.provide_label)
# initialize parameters by uniform random numbers
mod.init_params(initializer=init)
# use Sparse SGD with learning rate 0.1 to train
sgd = mx.optimizer.SGD(momentum=0.1, clip_gradient=5.0, learning_rate=0.01,
rescale_grad=1.0/batch_size)
mod.init_optimizer(optimizer=sgd)
if num_epochs is None:
num_epochs = 50
expected_accuracy = 0.02
# use accuracy as the metric
metric = mx.gluon.metric.create('MSE')
# train 'num_epochs' epoch
for epoch in range(num_epochs):
train_iter.reset()
metric.reset()
for batch in train_iter:
mod.forward(batch, is_train=True) # compute predictions
mod.update_metric(metric, batch.label) # accumulate prediction accuracy
mod.backward() # compute gradients
mod.update() # update parameters
print('Epoch %d, Training %s' % (epoch, metric.get()))
if num_epochs > 1:
assert(metric.get()[1] < expected_accuracy)
check_factorization_machine_module()
@with_seed()
def test_module_initializer():
def regression_model(m):
x = mx.symbol.var("data", stype='csr')
v = mx.symbol.var("v", shape=(m, 1), init=mx.init.Uniform(scale=.1),
stype='row_sparse')
model = mx.symbol.dot(lhs=x, rhs=v)
y = mx.symbol.Variable("label")
model = mx.symbol.LinearRegressionOutput(data=model, label=y, name="out")
return model
n, m = 128, 100
model = regression_model(m)
data = mx.nd.zeros(shape=(n, m), stype='csr')
label = mx.nd.zeros((n, 1))
iterator = mx.io.NDArrayIter(data=data, label={'label':label},
batch_size=n, last_batch_handle='discard')
# create module
mod = mx.mod.Module(symbol=model, data_names=['data'], label_names=['label'])
mod.bind(data_shapes=iterator.provide_data, label_shapes=iterator.provide_label)
mod.init_params()
v = mod._arg_params['v']
assert(v.stype == 'row_sparse')
assert(np.sum(v.asnumpy()) != 0)
@with_seed()
def test_forward_reshape():
num_class=10
data1 = mx.sym.Variable('data1')
data2 = mx.sym.Variable('data2')
conv1 = mx.sym.Convolution(data=data1, kernel=(2, 2), num_filter=2, stride=(2, 2))
conv2 = mx.sym.Convolution(data=data2, kernel=(3, 3), num_filter=3, stride=(1, 1))
pooling1 = mx.sym.Pooling(data=conv1, kernel=(2, 2), stride=(1, 1), pool_type="avg")
pooling2 = mx.sym.Pooling(data=conv2, kernel=(2, 2), stride=(1, 1), pool_type="max")
flatten1 = mx.sym.flatten(data=pooling1)
flatten2 = mx.sym.flatten(data=pooling2)
sum = mx.sym.sum(data=flatten1, axis=1) + mx.sym.sum(data=flatten2, axis=1)
fc = mx.sym.FullyConnected(data=sum, num_hidden=num_class)
sym = mx.sym.SoftmaxOutput(data=fc, name='softmax')
dshape1 = (10, 3, 64, 64)
dshape2 = (10, 3, 32, 32)
lshape = (10,)
mod = mx.mod.Module(symbol=sym, data_names=['data1', 'data2'],
label_names=['softmax_label'])
mod.bind(data_shapes=[('data1', dshape1), ('data2', dshape2)],
label_shapes=[('softmax_label', lshape)])
mod.init_params()
mod.init_optimizer(optimizer_params={'learning_rate': 0.01})
# Train with original data shapes
data_batch = mx.io.DataBatch(data=[mx.nd.random.uniform(0, 9, dshape1),
mx.nd.random.uniform(5, 15, dshape2)],
label=[mx.nd.ones(lshape)])
mod.forward(data_batch)
assert mod.get_outputs()[0].shape == tuple([lshape[0], num_class])
mod.backward()
mod.update()
# Train with different batch size
dshape1 = (3, 3, 64, 64)
dshape2 = (3, 3, 32, 32)
lshape = (3,)
data_batch = mx.io.DataBatch(data=[mx.nd.random.uniform(0, 9, dshape1),
mx.nd.random.uniform(5, 15, dshape2)],
label=[mx.nd.ones(lshape)])
mod.forward(data_batch)
assert mod.get_outputs()[0].shape == tuple([lshape[0], num_class])
mod.backward()
mod.update()
dshape1 = (20, 3, 64, 64)
dshape2 = (20, 3, 32, 32)
lshape = (20,)
data_batch = mx.io.DataBatch(data=[mx.nd.random.uniform(3, 5, dshape1),
mx.nd.random.uniform(10, 25, dshape2)],
label=[mx.nd.ones(lshape)])
mod.forward(data_batch)
assert mod.get_outputs()[0].shape == tuple([lshape[0], num_class])
mod.backward()
mod.update()
#Train with both different batch size and data shapes
dshape1 = (20, 3, 120, 120)
dshape2 = (20, 3, 32, 64)
lshape = (20,)
data_batch = mx.io.DataBatch(data=[mx.nd.random.uniform(0, 9, dshape1),
mx.nd.random.uniform(5, 15, dshape2)],
label=[mx.nd.ones(lshape)])
mod.forward(data_batch)
assert mod.get_outputs()[0].shape == tuple([lshape[0], num_class])
mod.backward()
mod.update()
dshape1 = (5, 3, 28, 40)
dshape2 = (5, 3, 24, 16)
lshape = (5,)
data_batch = mx.io.DataBatch(data=[mx.nd.random.uniform(0, 9, dshape1),
mx.nd.random.uniform(15, 25, dshape2)],
label=[mx.nd.ones(lshape)])
mod.forward(data_batch)
assert mod.get_outputs()[0].shape == tuple([lshape[0], num_class])
mod.backward()
mod.update()
#Test score
dataset_shape1 = (30, 3, 30, 30)
dataset_shape2 = (30, 3, 20, 40)
labelset_shape = (30,)
eval_dataiter = mx.io.NDArrayIter(data=[mx.nd.random.uniform(0, 9, dataset_shape1),
mx.nd.random.uniform(15, 25, dataset_shape2)],
label=[mx.nd.ones(labelset_shape)],
batch_size=5)
assert len(mod.score(eval_data=eval_dataiter, eval_metric='acc')) == 1
#Test prediction
dshape1 = (1, 3, 30, 30)
dshape2 = (1, 3, 20, 40)
dataset_shape1 = (10, 3, 30, 30)
dataset_shape2 = (10, 3, 20, 40)
pred_dataiter = mx.io.NDArrayIter(data=[mx.nd.random.uniform(0, 9, dataset_shape1),
mx.nd.random.uniform(15, 25, dataset_shape2)])
mod.bind(data_shapes=[('data1', dshape1), ('data2', dshape2)],
for_training=False, force_rebind=True)
assert mod.predict(pred_dataiter).shape == tuple([10, num_class])
@with_seed()
def test_forward_types():
#Test forward with other data batch API
Batch = namedtuple('Batch', ['data'])
data = mx.sym.Variable('data')
out = data * 2
mod = mx.mod.Module(symbol=out, label_names=None)
mod.bind(data_shapes=[('data', (1, 10))])
mod.init_params()
data1 = [mx.nd.ones((1, 10))]
mod.forward(Batch(data1))
assert mod.get_outputs()[0].shape == (1, 10)
data2 = [mx.nd.ones((3, 5))]
mod.forward(Batch(data2))
assert mod.get_outputs()[0].shape == (3, 5)
#Test forward with other NDArray and np.ndarray inputs
data = mx.sym.Variable('data')
out = data * 2
mod = mx.mod.Module(symbol=out, label_names=None)
mod.bind(data_shapes=[('data', (1, 10))])
mod.init_params()
data1 = mx.nd.ones((1, 10))
assert mod.predict(data1).shape == (1, 10)
data2 = np.ones((1, 10))
assert mod.predict(data1).shape == (1, 10)
def test_reference_single_batch_during_fit():
"""
When using C++-based iterators, it's important that only a single batch is referenced at a time. Because C++
iterators are exposed to the Python code through a C API, there is no concept of reference counting. Hence,
typically C++ iterators will deallocate a batch when next() is called on them. So, we need to make sure the Python
code only references a single batch at a time, otherwise the Python code will attempt to access freed memory,
resulting in either (a) garbage accuracy or (b) a segmentation fault.
"""
current_batch_i = None
class MockBatch(object):
def __init__(self, i):
self.i = i
@property
def label(self):
global current_batch_i
assert self.i == current_batch_i
class MockTrainData(object):
def __init__(self, batches):
self._i = 0
self._batches = batches
self.provide_data = None
self.provide_label = None
self.reset = lambda: None
def __iter__(self):
self._i = 0
return self
def __next__(self):
global current_batch_i
if self._i < self._batches:
current_batch_i = self._i
self._i += 1
return MockBatch(current_batch_i)
raise StopIteration
def next(self):
return self.__next__()
mod = mx.mod.BaseModule()
def empty_fn(*args, **kwargs):
pass
mod.bind = empty_fn
mod.init_params = empty_fn
mod.init_optimizer = empty_fn
mod.forward = empty_fn
mod.backward = empty_fn
mod.update = empty_fn
mod.update_metric = empty_fn
mod.get_params = lambda: (None, None)
train_data = MockTrainData(batches=2)
mod.fit(train_data, num_epoch=1)
@with_seed()
def test_bucket_module_grad_req():
batch_size = 2
def sym_gen(_):
data = mx.symbol.Variable('data')
weight = mx.symbol.Variable('a', shape=(1,), init=mx.init.One())
sym = mx.sym.make_loss(mx.sym.broadcast_mul(data, weight))
return sym, ('data',), None
mod = mx.mod.BucketingModule(sym_gen=sym_gen, default_bucket_key=10)
mod.bind(data_shapes=[['data', (batch_size, )]], for_training=True, grad_req='write')
mod.init_params()
mod.forward_backward(mx.io.DataBatch(data=[mx.nd.ones((batch_size,))],
label=None,
provide_data=[mx.io.DataDesc(name='data', shape=(batch_size, ), layout='N')],
bucket_key=10))
assert(mod._curr_module._exec_group.execs[0].grad_dict['a'].asscalar() == batch_size)
mod.forward_backward(mx.io.DataBatch(data=[mx.nd.ones((batch_size,))],
label=None,
provide_data=[mx.io.DataDesc(name='data', shape=(batch_size, ), layout='N')],
bucket_key=5))
assert(mod._curr_module._exec_group.execs[0].grad_dict['a'].asscalar() == batch_size)
mod = mx.mod.BucketingModule(sym_gen=sym_gen, default_bucket_key=10)
mod.bind(data_shapes=[['data', (batch_size, )]], for_training=True, grad_req='add')
mod.init_params()
mod.forward_backward(mx.io.DataBatch(data=[mx.nd.ones((batch_size,))],
label=None,
provide_data=[mx.io.DataDesc(name='data', shape=(batch_size,), layout='N')],
bucket_key=10))
assert(mod._curr_module._exec_group.execs[0].grad_dict['a'].asscalar() == batch_size)
mod.forward_backward(mx.io.DataBatch(data=[mx.nd.ones((batch_size,))],
label=None,
provide_data=[mx.io.DataDesc(name='data', shape=(batch_size,), layout='N')],
bucket_key=5))
assert mod._curr_module._grad_req == 'add'
assert(mod._curr_module._exec_group.execs[0].grad_dict['a'].asscalar() == 2 * batch_size)
def test_module_update_no_pragram():
# test module to do update on layers without params
data_shape = (10, 10)
data = mx.sym.Variable('data')
out = mx.sym.Dropout(data, 0.5)
mod = mx.mod.Module(out)
mod.bind(data_shapes=[('data', data_shape)])
mod.init_params()
mod.init_optimizer()
data_batch = mx.io.DataBatch([nd.ones(data_shape)])
mod.forward_backward(data_batch)
mod.update()
assert(mod.get_outputs()[0].shape == data_shape)
def test_module_init_optimizer():
def get_module_idx2name(mod):
idx2name = {}
idx2name.update(enumerate(mod._exec_group.param_names))
return idx2name
data = mx.sym.Variable('data')
sym = mx.sym.FullyConnected(data, num_hidden=20, name='fc')
batch_size = 8
opt_params = {'learning_rate': 1, 'rescale_grad': 1.0 / batch_size}
# Pass an optimizer str
mod1 = mx.mod.Module(sym, ('data',), None, context=mx.cpu(0))
mod1.bind(data_shapes=[('data', (batch_size, 20))])
mod1.init_params()
mod1.init_optimizer(optimizer='sgd', optimizer_params=opt_params)
assert mod1._optimizer.idx2name == get_module_idx2name(mod1)
# Pass an Optimizer object
mod2 = mx.mod.Module(sym, ('data',), None, context=mx.cpu(0))
mod2.bind(data_shapes=[('data', (batch_size, 20))])
mod2.init_params()
opt = mx.optimizer.SGD(**opt_params)
mod2.init_optimizer(optimizer=opt)
assert mod2._optimizer.idx2name == get_module_idx2name(mod2)
| 43.564922 | 120 | 0.608888 |
7941ef012dcf5f29b55a4c4879aa4704f5b3ee6b | 227 | py | Python | examples/jeans_damping.py | DDMGNI/viVlasov1D | 901dd058711f6943eb6497b941bc115a64e822de | [
"MIT"
] | 2 | 2018-09-13T12:39:07.000Z | 2019-04-05T04:55:59.000Z | examples/jeans_damping.py | DDMGNI/viVlasov1D | 901dd058711f6943eb6497b941bc115a64e822de | [
"MIT"
] | null | null | null | examples/jeans_damping.py | DDMGNI/viVlasov1D | 901dd058711f6943eb6497b941bc115a64e822de | [
"MIT"
] | null | null | null | """
Jeans Damping
Initial density: :math:`n(x) = 1 + A \, \cos{( k_{x} \, ( x - L_{x}/2)) }`
with :math:`A = 0.1, k = 2.0`
"""
import numpy as np
def density(x, L):
return 1. + 0.1 * np.cos(2.0 * (x - 0.5 * L))
| 15.133333 | 77 | 0.46696 |
7941ef68de04d3af1c493b2d1d50cf54abb1feeb | 2,887 | py | Python | app/auth/services/reauthentication.py | Arjun-sna/flask-forum-api-service | 9c33c10269a147d7c5225e9c9106ccc43eb31705 | [
"BSD-3-Clause"
] | null | null | null | app/auth/services/reauthentication.py | Arjun-sna/flask-forum-api-service | 9c33c10269a147d7c5225e9c9106ccc43eb31705 | [
"BSD-3-Clause"
] | 1 | 2021-11-25T17:25:19.000Z | 2021-11-25T17:25:19.000Z | app/auth/services/reauthentication.py | Arjun-sna/flask-forum-api-service | 9c33c10269a147d7c5225e9c9106ccc43eb31705 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
app.auth.services.reauthentication
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tools for handling reauthentication needs inside app.
:copyright: (c) 2014-2018 the FlaskForum Team
:license: BSD, see LICENSE for more details
"""
import logging
from flask_babelplus import gettext as _
from werkzeug.security import check_password_hash
from ...core.tokens import TokenActions, Token
from ...core.auth.authentication import (PostReauthenticateHandler,
ReauthenticateFailureHandler,
ReauthenticateManager,
ReauthenticateProvider,
StopAuthentication)
from ...utils.helpers import time_utcnow
logger = logging.getLogger(__name__)
class DefaultAppReauthProvider(ReauthenticateProvider):
"""
This is the default reauth provider in FlaskForum, it compares the provided
password against the current user's hashed password.
"""
def reauthenticate(self, user):
if user: # pragma: no branch
return True
class ClearFailedLoginsOnReauth(PostReauthenticateHandler):
"""
Handler that clears failed login attempts after a successful
reauthentication.
"""
def handle_post_reauth(self, user):
user.login_attempts = 0
class MarkFailedReauth(ReauthenticateFailureHandler):
"""
Failure handler that marks the failed reauth attempt as a failed login
and when it occurred.
"""
def handle_reauth_failure(self, user):
user.login_attempts += 1
user.last_failed_login = time_utcnow()
class PluginReauthenticationManager(ReauthenticateManager):
"""
Default reauthentication manager for FlaskForum, it relies on plugin hooks
to manage the reauthentication flow.
"""
def __init__(self, plugin_manager, session, token_serializer):
self.plugin_manager = plugin_manager
self.session = session
self.token_serializer = token_serializer
def reauthenticate(self, user):
try:
result = self.plugin_manager.hook.app_reauth_attempt(
user=user
)
if not result:
raise StopAuthentication(_("Verification failed."))
self.plugin_manager.hook.app_post_reauth(user=user)
token = self.token_serializer.dumps(
Token(user_id=user.id, operation=TokenActions.AUTH))
return token, user
except StopAuthentication:
self.plugin_manager.hook.app_reauth_failed(user=user)
raise
finally:
try:
self.session.commit()
except Exception:
logger.exception("Exception while processing login")
self.session.rollback()
raise
| 31.725275 | 79 | 0.63353 |
7941efa04030bfd63e292cb3de20933c30f4773a | 17,515 | py | Python | musicbot/config.py | GoSeungUk/subot | e1e9b6566d20b6ed81acd646f7528437c5306f24 | [
"MIT"
] | 4 | 2020-07-14T15:06:00.000Z | 2021-09-23T16:41:14.000Z | musicbot/config.py | GoSeungUk/subot | e1e9b6566d20b6ed81acd646f7528437c5306f24 | [
"MIT"
] | 3 | 2021-11-16T05:58:18.000Z | 2022-03-30T11:41:19.000Z | musicbot/config.py | GoSeungUk/subot | e1e9b6566d20b6ed81acd646f7528437c5306f24 | [
"MIT"
] | 1 | 2021-09-15T00:53:02.000Z | 2021-09-15T00:53:02.000Z | import os
import sys
import codecs
import shutil
import logging
import configparser
from .exceptions import HelpfulError
log = logging.getLogger(__name__)
class Config:
# noinspection PyUnresolvedReferences
def __init__(self, config_file):
self.config_file = config_file
self.find_config()
config = configparser.ConfigParser(interpolation=None)
config.read(config_file, encoding='utf-8')
confsections = {"Credentials", "Permissions", "Chat", "MusicBot"}.difference(config.sections())
if confsections:
raise HelpfulError(
"One or more required config sections are missing.",
"Fix your config. Each [Section] should be on its own line with "
"nothing else on it. The following sections are missing: {}".format(
', '.join(['[%s]' % s for s in confsections])
),
preface="An error has occured parsing the config:\n"
)
self._confpreface = "An error has occured reading the config:\n"
self._confpreface2 = "An error has occured validating the config:\n"
self._login_token = config.get('Credentials', 'Token', fallback=ConfigDefaults.token)
self.auth = ()
self.spotify_clientid = config.get('Credentials', 'Spotify_ClientID', fallback=ConfigDefaults.spotify_clientid)
self.spotify_clientsecret = config.get('Credentials', 'Spotify_ClientSecret', fallback=ConfigDefaults.spotify_clientsecret)
self.owner_id = config.get('Permissions', 'OwnerID', fallback=ConfigDefaults.owner_id)
self.dev_ids = config.get('Permissions', 'DevIDs', fallback=ConfigDefaults.dev_ids)
self.bot_exception_ids = config.get("Permissions", "BotExceptionIDs", fallback=ConfigDefaults.bot_exception_ids)
self.command_prefix = config.get('Chat', 'CommandPrefix', fallback=ConfigDefaults.command_prefix)
self.bound_channels = config.get('Chat', 'BindToChannels', fallback=ConfigDefaults.bound_channels)
self.unbound_servers = config.getboolean('Chat', 'AllowUnboundServers', fallback=ConfigDefaults.unbound_servers)
self.autojoin_channels = config.get('Chat', 'AutojoinChannels', fallback=ConfigDefaults.autojoin_channels)
self.dm_nowplaying = config.getboolean('Chat', 'DMNowPlaying', fallback=ConfigDefaults.dm_nowplaying)
self.no_nowplaying_auto = config.getboolean('Chat', 'DisableNowPlayingAutomatic', fallback=ConfigDefaults.no_nowplaying_auto)
self.nowplaying_channels = config.get('Chat', 'NowPlayingChannels', fallback=ConfigDefaults.nowplaying_channels)
self.delete_nowplaying = config.getboolean('Chat', 'DeleteNowPlaying', fallback=ConfigDefaults.delete_nowplaying)
self.default_volume = config.getfloat('MusicBot', 'DefaultVolume', fallback=ConfigDefaults.default_volume)
self.skips_required = config.getint('MusicBot', 'SkipsRequired', fallback=ConfigDefaults.skips_required)
self.skip_ratio_required = config.getfloat('MusicBot', 'SkipRatio', fallback=ConfigDefaults.skip_ratio_required)
self.save_videos = config.getboolean('MusicBot', 'SaveVideos', fallback=ConfigDefaults.save_videos)
self.now_playing_mentions = config.getboolean('MusicBot', 'NowPlayingMentions', fallback=ConfigDefaults.now_playing_mentions)
self.auto_summon = config.getboolean('MusicBot', 'AutoSummon', fallback=ConfigDefaults.auto_summon)
self.auto_playlist = config.getboolean('MusicBot', 'UseAutoPlaylist', fallback=ConfigDefaults.auto_playlist)
self.auto_playlist_random = config.getboolean('MusicBot', 'AutoPlaylistRandom', fallback=ConfigDefaults.auto_playlist_random)
self.auto_pause = config.getboolean('MusicBot', 'AutoPause', fallback=ConfigDefaults.auto_pause)
self.delete_messages = config.getboolean('MusicBot', 'DeleteMessages', fallback=ConfigDefaults.delete_messages)
self.delete_invoking = config.getboolean('MusicBot', 'DeleteInvoking', fallback=ConfigDefaults.delete_invoking)
self.persistent_queue = config.getboolean('MusicBot', 'PersistentQueue', fallback=ConfigDefaults.persistent_queue)
self.status_message = config.get('MusicBot', 'StatusMessage', fallback=ConfigDefaults.status_message)
self.write_current_song = config.getboolean('MusicBot', 'WriteCurrentSong', fallback=ConfigDefaults.write_current_song)
self.allow_author_skip = config.getboolean('MusicBot', 'AllowAuthorSkip', fallback=ConfigDefaults.allow_author_skip)
self.use_experimental_equalization = config.getboolean('MusicBot', 'UseExperimentalEqualization', fallback=ConfigDefaults.use_experimental_equalization)
self.embeds = config.getboolean('MusicBot', 'UseEmbeds', fallback=ConfigDefaults.embeds)
self.queue_length = config.getint('MusicBot', 'QueueLength', fallback=ConfigDefaults.queue_length)
self.remove_ap = config.getboolean('MusicBot', 'RemoveFromAPOnError', fallback=ConfigDefaults.remove_ap)
self.show_config_at_start = config.getboolean('MusicBot', 'ShowConfigOnLaunch', fallback=ConfigDefaults.show_config_at_start)
self.legacy_skip = config.getboolean('MusicBot', 'LegacySkip', fallback=ConfigDefaults.legacy_skip)
self.leavenonowners = config.getboolean('MusicBot', 'LeaveServersWithoutOwner', fallback=ConfigDefaults.leavenonowners)
self.usealias = config.getboolean('MusicBot', 'UseAlias', fallback=ConfigDefaults.usealias)
self.debug_level = config.get('MusicBot', 'DebugLevel', fallback=ConfigDefaults.debug_level)
self.debug_level_str = self.debug_level
self.debug_mode = False
self.blacklist_file = config.get('Files', 'BlacklistFile', fallback=ConfigDefaults.blacklist_file)
self.auto_playlist_file = config.get('Files', 'AutoPlaylistFile', fallback=ConfigDefaults.auto_playlist_file)
self.i18n_file = config.get('Files', 'i18nFile', fallback=ConfigDefaults.i18n_file)
self.auto_playlist_removed_file = None
self.run_checks()
self.missing_keys = set()
self.check_changes(config)
self.find_autoplaylist()
def get_all_keys(self, conf):
"""Returns all config keys as a list"""
sects = dict(conf.items())
keys = []
for k in sects:
s = sects[k]
keys += [key for key in s.keys()]
return keys
def check_changes(self, conf):
exfile = 'config/example_options.ini'
if os.path.isfile(exfile):
usr_keys = self.get_all_keys(conf)
exconf = configparser.ConfigParser(interpolation=None)
if not exconf.read(exfile, encoding='utf-8'):
return
ex_keys = self.get_all_keys(exconf)
if set(usr_keys) != set(ex_keys):
self.missing_keys = set(ex_keys) - set(usr_keys) # to raise this as an issue in bot.py later
def run_checks(self):
"""
Validation logic for bot settings.
"""
if self.i18n_file != ConfigDefaults.i18n_file and not os.path.isfile(self.i18n_file):
log.warning('i18n file does not exist. Trying to fallback to {0}.'.format(ConfigDefaults.i18n_file))
self.i18n_file = ConfigDefaults.i18n_file
if not os.path.isfile(self.i18n_file):
raise HelpfulError(
"Your i18n file was not found, and we could not fallback.",
"As a result, the bot cannot launch. Have you moved some files? "
"Try pulling the recent changes from Git, or resetting your local repo.",
preface=self._confpreface
)
log.info('Using i18n: {0}'.format(self.i18n_file))
if not self._login_token:
raise HelpfulError(
"No bot token was specified in the config.",
"As of v1.9.6_1, you are required to use a Discord bot account. "
"See https://github.com/Just-Some-Bots/MusicBot/wiki/FAQ for info.",
preface=self._confpreface
)
else:
self.auth = (self._login_token,)
if self.owner_id:
self.owner_id = self.owner_id.lower()
if self.owner_id.isdigit():
if int(self.owner_id) < 10000:
raise HelpfulError(
"An invalid OwnerID was set: {}".format(self.owner_id),
"Correct your OwnerID. The ID should be just a number, approximately "
"18 characters long, or 'auto'. If you don't know what your ID is, read the "
"instructions in the options or ask in the help server.",
preface=self._confpreface
)
self.owner_id = int(self.owner_id)
elif self.owner_id == 'auto':
pass # defer to async check
else:
self.owner_id = None
if not self.owner_id:
raise HelpfulError(
"No OwnerID was set.",
"Please set the OwnerID option in {}".format(self.config_file),
preface=self._confpreface
)
if self.bot_exception_ids:
try:
self.bot_exception_ids = set(int(x) for x in self.bot_exception_ids.replace(',', ' ').split())
except:
log.warning("BotExceptionIDs data is invalid, will ignore all bots")
self.bot_exception_ids = set()
if self.bound_channels:
try:
self.bound_channels = set(x for x in self.bound_channels.replace(',', ' ').split() if x)
except:
log.warning("BindToChannels data is invalid, will not bind to any channels")
self.bound_channels = set()
if self.autojoin_channels:
try:
self.autojoin_channels = set(x for x in self.autojoin_channels.replace(',', ' ').split() if x)
except:
log.warning("AutojoinChannels data is invalid, will not autojoin any channels")
self.autojoin_channels = set()
if self.nowplaying_channels:
try:
self.nowplaying_channels = set(int(x) for x in self.nowplaying_channels.replace(',', ' ').split() if x)
except:
log.warning("NowPlayingChannels data is invalid, will use the default behavior for all servers")
self.autojoin_channels = set()
self._spotify = False
if self.spotify_clientid and self.spotify_clientsecret:
self._spotify = True
self.delete_invoking = self.delete_invoking and self.delete_messages
self.bound_channels = set(int(item) for item in self.bound_channels)
self.autojoin_channels = set(int(item) for item in self.autojoin_channels)
ap_path, ap_name = os.path.split(self.auto_playlist_file)
apn_name, apn_ext = os.path.splitext(ap_name)
self.auto_playlist_removed_file = os.path.join(ap_path, apn_name + '_removed' + apn_ext)
if hasattr(logging, self.debug_level.upper()):
self.debug_level = getattr(logging, self.debug_level.upper())
else:
log.warning("Invalid DebugLevel option \"{}\" given, falling back to INFO".format(self.debug_level_str))
self.debug_level = logging.INFO
self.debug_level_str = 'INFO'
self.debug_mode = self.debug_level <= logging.DEBUG
self.create_empty_file_ifnoexist('config/blacklist.txt')
self.create_empty_file_ifnoexist('config/whitelist.txt')
def create_empty_file_ifnoexist(self, path):
if not os.path.isfile(path):
open(path, 'a').close()
log.warning('Creating %s' % path)
# TODO: Add save function for future editing of options with commands
# Maybe add warnings about fields missing from the config file
async def async_validate(self, bot):
log.debug("Validating options...")
if self.owner_id == 'auto':
if not bot.user.bot:
raise HelpfulError(
"Invalid parameter \"auto\" for OwnerID option.",
"Only bot accounts can use the \"auto\" option. Please "
"set the OwnerID in the config.",
preface=self._confpreface2
)
self.owner_id = bot.cached_app_info.owner.id
log.debug("Acquired owner id via API")
if self.owner_id == bot.user.id:
raise HelpfulError(
"Your OwnerID is incorrect or you've used the wrong credentials.",
"The bot's user ID and the id for OwnerID is identical. "
"This is wrong. The bot needs a bot account to function, "
"meaning you cannot use your own account to run the bot on. "
"The OwnerID is the id of the owner, not the bot. "
"Figure out which one is which and use the correct information.",
preface=self._confpreface2
)
def find_config(self):
config = configparser.ConfigParser(interpolation=None)
if not os.path.isfile(self.config_file):
if os.path.isfile(self.config_file + '.ini'):
shutil.move(self.config_file + '.ini', self.config_file)
log.info("Moving {0} to {1}, you should probably turn file extensions on.".format(
self.config_file + '.ini', self.config_file
))
elif os.path.isfile('config/example_options.ini'):
shutil.copy('config/example_options.ini', self.config_file)
log.warning('Options file not found, copying example_options.ini')
else:
raise HelpfulError(
"Your config files are missing. Neither options.ini nor example_options.ini were found.",
"Grab the files back from the archive or remake them yourself and copy paste the content "
"from the repo. Stop removing important files!"
)
if not config.read(self.config_file, encoding='utf-8'):
c = configparser.ConfigParser()
try:
# load the config again and check to see if the user edited that one
c.read(self.config_file, encoding='utf-8')
if not int(c.get('Permissions', 'OwnerID', fallback=0)): # jake pls no flame
print(flush=True)
log.critical("Please configure config/options.ini and re-run the bot.")
sys.exit(1)
except ValueError: # Config id value was changed but its not valid
raise HelpfulError(
'Invalid value "{}" for OwnerID, config cannot be loaded. '.format(
c.get('Permissions', 'OwnerID', fallback=None)
),
"The OwnerID option requires a user ID or 'auto'."
)
except Exception as e:
print(flush=True)
log.critical("Unable to copy config/example_options.ini to {}".format(self.config_file), exc_info=e)
sys.exit(2)
def find_autoplaylist(self):
if not os.path.exists(self.auto_playlist_file):
if os.path.exists('config/_autoplaylist.txt'):
shutil.copy('config/_autoplaylist.txt', self.auto_playlist_file)
log.debug("Copying _autoplaylist.txt to autoplaylist.txt")
else:
log.warning("No autoplaylist file found.")
def write_default_config(self, location):
pass
class ConfigDefaults:
owner_id = None
token = None
dev_ids = set()
bot_exception_ids = set()
spotify_clientid = None
spotify_clientsecret = None
command_prefix = '!'
bound_channels = set()
unbound_servers = False
autojoin_channels = set()
dm_nowplaying = False
no_nowplaying_auto = False
nowplaying_channels = set()
delete_nowplaying = True
default_volume = 0.15
skips_required = 4
skip_ratio_required = 0.5
save_videos = True
now_playing_mentions = False
auto_summon = True
auto_playlist = True
auto_playlist_random = True
auto_pause = True
delete_messages = True
delete_invoking = False
persistent_queue = True
debug_level = 'INFO'
status_message = None
write_current_song = False
allow_author_skip = True
use_experimental_equalization = False
embeds = True
queue_length = 10
remove_ap = True
show_config_at_start = False
legacy_skip = False
leavenonowners = False
usealias = True
options_file = 'config/options.ini'
blacklist_file = 'config/blacklist.txt'
auto_playlist_file = 'config/autoplaylist.txt' # this will change when I add playlists
i18n_file = 'config/i18n/en.json'
setattr(ConfigDefaults, codecs.decode(b'ZW1haWw=', '\x62\x61\x73\x65\x36\x34').decode('ascii'), None)
setattr(ConfigDefaults, codecs.decode(b'cGFzc3dvcmQ=', '\x62\x61\x73\x65\x36\x34').decode('ascii'), None)
setattr(ConfigDefaults, codecs.decode(b'dG9rZW4=', '\x62\x61\x73\x65\x36\x34').decode('ascii'), None)
# These two are going to be wrappers for the id lists, with add/remove/load/save functions
# and id/object conversion so types aren't an issue
class Blacklist:
pass
class Whitelist:
pass
| 45.611979 | 160 | 0.64539 |
7941f01538fb161a16c91ba29d0aaaa2a518c3f4 | 1,058 | py | Python | kubernetes_asyncio/test/test_extensions_v1beta1_scale_spec.py | aK0nshin/kubernetes_asyncio | aef9edcc1f8671a5b1bba9f4684bde890176b19c | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/test/test_extensions_v1beta1_scale_spec.py | aK0nshin/kubernetes_asyncio | aef9edcc1f8671a5b1bba9f4684bde890176b19c | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/test/test_extensions_v1beta1_scale_spec.py | aK0nshin/kubernetes_asyncio | aef9edcc1f8671a5b1bba9f4684bde890176b19c | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.14.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.extensions_v1beta1_scale_spec import ExtensionsV1beta1ScaleSpec # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestExtensionsV1beta1ScaleSpec(unittest.TestCase):
"""ExtensionsV1beta1ScaleSpec unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testExtensionsV1beta1ScaleSpec(self):
"""Test ExtensionsV1beta1ScaleSpec"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes_asyncio.client.models.extensions_v1beta1_scale_spec.ExtensionsV1beta1ScaleSpec() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 26.45 | 124 | 0.749527 |
7941f03b00f559059b0ba186cb20e7a4718b8aaf | 3,149 | py | Python | calculators/settings.py | nathantheinventor/calculators | 32a5785cb21e313950961d7388b0d493293f5b3f | [
"Apache-2.0"
] | null | null | null | calculators/settings.py | nathantheinventor/calculators | 32a5785cb21e313950961d7388b0d493293f5b3f | [
"Apache-2.0"
] | null | null | null | calculators/settings.py | nathantheinventor/calculators | 32a5785cb21e313950961d7388b0d493293f5b3f | [
"Apache-2.0"
] | null | null | null | """
Django settings for calculators project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'k)qcp9$==(doowsl2x6c)^qf6*$m(c5w#o_#-^7n!_15jj3h7e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'mycalculators.apps.MyCalculatorsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'calculators.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'calculators.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| 25.811475 | 91 | 0.697682 |
7941f053294ae962dad8e57a0ea9004542cb60c0 | 6,615 | py | Python | mm/src/mm.py | madmachineio/mm-sdk | e587ffd60508699283cd5fcded971ffcc124e115 | [
"MIT"
] | 13 | 2020-09-09T04:13:45.000Z | 2021-10-01T10:15:31.000Z | mm/src/mm.py | madmachineio/mm-sdk | e587ffd60508699283cd5fcded971ffcc124e115 | [
"MIT"
] | null | null | null | mm/src/mm.py | madmachineio/mm-sdk | e587ffd60508699283cd5fcded971ffcc124e115 | [
"MIT"
] | 1 | 2021-03-18T10:02:07.000Z | 2021-03-18T10:02:07.000Z | import os, sys, platform, argparse
from pathlib import Path
import log, util, spm, mmp, download
PROJECT_PATH = ''
def init_project(args):
mmp_manifest = Path(PROJECT_PATH / 'Package.mmp')
spm_manifest = Path(PROJECT_PATH / 'Package.swift')
if mmp_manifest.is_file():
log.die('Package.mmp already exists in this directory')
board_name = args.board
if not spm_manifest.is_file():
init_type = args.type
if args.name:
init_name = args.name
else:
init_name = PROJECT_PATH.name
if init_type == 'executable' and board_name is None:
log.die('board name is required to initialize an executable')
content = spm.init_manifest(p_name=init_name, p_type=init_type)
spm_manifest.write_text(content, encoding='UTF-8')
else:
log.wrn('Package.swift already exists, ignoring specified project type and project name')
spm.initialize()
init_name = spm.get_project_name()
init_type = spm.get_project_type()
if init_type == 'executable' and board_name is None:
log.die('board name is required to initialize an executable')
content = mmp.init_manifest(board=board_name, p_type=init_type)
log.inf('Creating Package.mmp', level=log.VERBOSE_VERY)
mmp_manifest.write_text(content, encoding='UTF-8')
def build_project(args):
mmp_manifest = Path(PROJECT_PATH / 'Package.mmp')
if not mmp_manifest.is_file():
log.die('Package.mmp is required to build the project')
content = mmp_manifest.read_text()
mmp.initialize(content)
mmp.clean(p_path=PROJECT_PATH)
spm.initialize()
p_name = spm.get_project_name()
p_type = spm.get_project_type()
js_data = mmp.get_destination(p_type=p_type)
(PROJECT_PATH / '.build').mkdir(exist_ok=True)
destination = PROJECT_PATH / '.build/destination.json'
destination.write_text(js_data, encoding='UTF-8')
spm.build(destination=destination, p_type=p_type)
triple = mmp.get_triple()
path = PROJECT_PATH / '.build' / triple / 'release'
if p_type == 'executable' and (path / p_name).exists():
mmp.create_binary(path=path, name=p_name)
log.inf('Done!')
def download_project(args):
mmp_manifest = Path(PROJECT_PATH / 'Package.mmp')
if not mmp_manifest.is_file():
log.die('Package.mmp is required to download the project')
system = platform.system()
if system != 'Darwin':
log.die(system + ' is not supported currently, please copy the binary file manually')
content = mmp_manifest.read_text()
mmp.initialize(content)
board_name = mmp.get_board_name()
if board_name is None:
log.die('Board name is not specified')
file_name = mmp.get_board_info('target_file')
source = PROJECT_PATH / '.build' / mmp.get_triple() / 'release' / file_name
if not source.is_file():
log.die('Cannot find ' + file_name)
download.darwin_download(source=source)
log.inf('Done!')
def clean_project(args):
mmp.clean(p_path=PROJECT_PATH)
if args.deep:
spm.clean()
def get_info(args):
if args.info == 'usb':
mmp_manifest = Path(PROJECT_PATH / 'Package.mmp')
if not mmp_manifest.is_file():
log.die('Package.mmp is required to get usb status')
system = platform.system()
if system != 'Darwin':
log.die(system + ' is not supported currently, please copy the bin file manually')
content = mmp_manifest.read_text()
mmp.initialize(content)
board_name = mmp.get_board_name()
mount_path = download.darwin_get_mount_point()
if mount_path is None:
log.inf(board_name + ' not connected')
else:
log.inf(board_name + ' ready')
else:
spm_manifest = Path(PROJECT_PATH / 'Package.swift')
if not spm_manifest.is_file():
log.die('Package.swift is required to get project name')
spm.initialize()
project_name = spm.get_project_name()
log.inf(project_name)
def main():
global PROJECT_PATH
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
init_parser = subparsers.add_parser('init', help = 'Initiaize a new project')
init_parser.add_argument('--type', type = str, choices = ['executable', 'library'], default = 'executable', help = 'Project type, default type is executable')
init_parser.add_argument('--name', type = str, help = 'Initiaize the new project with a specified name, otherwise the project name depends on the current directory name')
init_parser.add_argument('-b', '--board', type = str, choices =['SwiftIOBoard', 'SwiftIOFeather'], help = 'Generate MadMachine project file by passing this parameter')
init_parser.add_argument('-v', '--verbose', action = 'store_true', help = "Increase output verbosity")
init_parser.set_defaults(func = init_project)
build_parser = subparsers.add_parser('build', help = 'Build a project')
build_parser.add_argument('-v', '--verbose', action = 'store_true', help = "Increase output verbosity")
build_parser.set_defaults(func = build_project)
download_parser = subparsers.add_parser('download', help = 'Download the target executable to the board\'s SD card')
download_parser.add_argument('-v', '--verbose', action = 'store_true', help = "Increase output verbosity")
download_parser.set_defaults(func = download_project)
clean_parser = subparsers.add_parser('clean', help = 'Clean project')
clean_parser.add_argument('--deep', action = 'store_true', help = "Clean all compilation outputs")
clean_parser.add_argument('-v', '--verbose', action = 'store_true', help = "Increase output verbosity")
clean_parser.set_defaults(func = clean_project)
get_parser = subparsers.add_parser('get', help = 'Get specified information, used by IDE')
get_parser.add_argument('--info', type = str, choices =['name', 'usb'], help = 'Information type')
get_parser.add_argument('-v', '--verbose', action = 'store_true', help = "Increase output verbosity")
get_parser.set_defaults(func = get_info)
args = parser.parse_args()
if vars(args).get('func') is None:
log.die('subcommand is required, use \'mm --help\' to get more information')
if args.verbose:
log.set_verbosity(log.VERBOSE_VERY)
sdk_path = Path(os.path.realpath(sys.argv[0])).parent.parent.parent
util.set_sdk_path(sdk_path)
PROJECT_PATH = Path('.').resolve()
args.func(args)
if __name__ == "__main__":
main() | 38.911765 | 174 | 0.674981 |
7941f0d06b5f7a7c38a31e929f441d0d9e436215 | 23,930 | py | Python | src/indra_cogex/client/neo4j_client.py | bgyori/indra_cogex | 04a72d7941d4acd31ebfe73568114415d43394ea | [
"BSD-2-Clause"
] | 2 | 2021-05-27T02:44:09.000Z | 2022-01-12T21:34:07.000Z | src/indra_cogex/client/neo4j_client.py | bgyori/indra_cogex | 04a72d7941d4acd31ebfe73568114415d43394ea | [
"BSD-2-Clause"
] | 33 | 2021-08-29T18:23:26.000Z | 2022-03-29T21:56:08.000Z | src/indra_cogex/client/neo4j_client.py | bgyori/indra_cogex | 04a72d7941d4acd31ebfe73568114415d43394ea | [
"BSD-2-Clause"
] | 5 | 2021-06-15T09:01:23.000Z | 2022-03-13T14:26:09.000Z | __all__ = ["Neo4jClient"]
import logging
from typing import Any, Iterable, List, Mapping, Optional, Set, Tuple, Union
import neo4j
import neo4j.graph
from neo4j import GraphDatabase
from indra.config import get_config
from indra.databases import identifiers
from indra.ontology.standardize import get_standard_agent
from indra.statements import Agent
from indra_cogex.representation import Node, Relation, norm_id, triple_query
logger = logging.getLogger(__name__)
class Neo4jClient:
"""A client to communicate with an INDRA CogEx neo4j instance
Parameters
----------
url :
The bolt URL to the neo4j instance to override INDRA_NEO4J_URL
set as an environment variable or set in the INDRA config file.
auth :
A tuple consisting of the user name and password for the neo4j instance to
override INDRA_NEO4J_USER and
INDRA_NEO4J_PASSWORD set as environment variables or set in the INDRA config file.
"""
def __init__(
self,
url: Optional[str] = None,
auth: Optional[Tuple[str, str]] = None,
):
"""Initialize the Neo4j client."""
self.driver = None
self.session = None
if not url:
INDRA_NEO4J_URL = get_config("INDRA_NEO4J_URL")
if INDRA_NEO4J_URL:
url = INDRA_NEO4J_URL
logger.info("Using configured URL for INDRA neo4j connection")
else:
logger.info("INDRA_NEO4J_URL not configured")
if not auth:
INDRA_NEO4J_USER = get_config("INDRA_NEO4J_USER")
INDRA_NEO4J_PASSWORD = get_config("INDRA_NEO4J_PASSWORD")
if INDRA_NEO4J_USER and INDRA_NEO4J_PASSWORD:
auth = (INDRA_NEO4J_USER, INDRA_NEO4J_PASSWORD)
logger.info("Using configured credentials for INDRA neo4j connection")
else:
logger.info("INDRA_NEO4J_USER and INDRA_NEO4J_PASSWORD not configured")
self.driver = GraphDatabase.driver(url, auth=auth)
def create_tx(
self,
query: str,
query_params: Optional[Mapping[str, Any]] = None,
):
"""Run a transaction which writes to the neo4j instance.
Parameters
----------
query :
The query string to be executed.
query_params :
Parameters associated with the query.
"""
tx = self.get_session().begin_transaction()
try:
# logger.info(query)
tx.run(query, parameters=query_params)
tx.commit()
except Exception as e:
logger.error(e)
finally:
tx.close()
def query_tx(self, query: str) -> Union[List[List[Any]], None]:
"""Run a read-only query and return the results.
Parameters
----------
query :
The query string to be executed.
Returns
-------
values :
A list of results where each result is a list of one or more
objects (typically neo4j nodes or relations).
"""
tx = self.get_session().begin_transaction()
try:
res = tx.run(query)
except Exception as e:
logger.error(e)
tx.close()
return
values = res.values()
tx.close()
return values
def get_session(self, renew: Optional[bool] = False) -> neo4j.Session:
"""Return an existing session or create one if needed.
Parameters
----------
renew :
If True, a new session is created. Default: False
Returns
-------
session
A neo4j session.
"""
if self.session is None or renew:
sess = self.driver.session()
self.session = sess
return self.session
def has_relation(
self,
source: Tuple[str, str],
target: Tuple[str, str],
relation: str,
source_type: Optional[str] = None,
target_type: Optional[str] = None,
) -> bool:
"""Return True if there is a relation between the source and the target.
Parameters
----------
source :
Source namespace and identifier.
target :
Target namespace and identifier.
relation :
Relation type.
source_type :
A constraint on the source type
target_type :
A constraint on the target type
Returns
-------
related :
True if there is a relation of the given type, otherwise False.
"""
res = self.get_relations(
source,
target,
relation,
limit=1,
source_type=source_type,
target_type=target_type,
)
if res:
return True
else:
return False
def get_relations(
self,
source: Optional[Tuple[str, str]] = None,
target: Optional[Tuple[str, str]] = None,
relation: Optional[str] = None,
source_type: Optional[str] = None,
target_type: Optional[str] = None,
limit: Optional[int] = None,
) -> List[Relation]:
"""Return relations based on source, target and type constraints.
This is a generic function for getting relations, all of its parameters
are optional, though at least a source or a target needs to be provided.
Parameters
----------
source :
Surce namespace and ID.
target :
Target namespace and ID.
relation :
Relation type.
source_type :
A constraint on the source type
target_type :
A constraint on the target type
limit :
A limit on the number of relations returned.
Returns
-------
rels :
A list of relations matching the constraints.
"""
if not source and not target:
raise ValueError("source or target should be specified")
source = norm_id(*source) if source else None
target = norm_id(*target) if target else None
match = triple_query(
source_id=source,
source_type=source_type,
relation_type=relation,
target_id=target,
target_type=target_type,
)
query = """
MATCH p=%s
RETURN DISTINCT p
%s
""" % (
match,
"" if not limit else "LIMIT %s" % limit,
)
rels = [self.neo4j_to_relation(res[0]) for res in self.query_tx(query)]
return rels
def get_source_relations(
self,
target: Tuple[str, str],
relation: Optional[str] = None,
target_type: Optional[str] = None,
) -> List[Relation]:
"""Get relations that connect sources to the given target.
Parameters
----------
target :
Target namespace and identifier.
relation :
Relation type.
Returns
-------
rels :
A list of relations matching the constraints.
"""
return self.get_relations(
source=None, target=target, relation=relation, target_type=target_type
)
def get_target_relations(
self,
source: Tuple[str, str],
relation: Optional[str] = None,
source_type: Optional[str] = None,
) -> List[Relation]:
"""Get relations that connect targets from the given source.
Parameters
----------
source :
Source namespace and identifier.
relation :
Relation type.
Returns
-------
rels :
A list of relations matching the constraints.
"""
return self.get_relations(
source=source, target=None, relation=relation, source_type=source_type
)
def get_all_relations(
self,
node: Tuple[str, str],
relation: Optional[str] = None,
source_type: Optional[str] = None,
target_type: Optional[str] = None,
) -> List[Relation]:
"""Get relations that connect sources and targets with the given node.
Parameters
----------
node :
Node namespace and identifier.
relation :
Relation type.
source_type :
Type constraint on the sources for in-edges
target_type :
Type constraint on te targets for out-edges
Returns
-------
rels :
A list of relations matching the constraints.
"""
source_rels = self.get_source_relations(
target=node, relation=relation, source_type=source_type
)
target_rels = self.get_target_relations(
source=node, relation=relation, target_type=target_type
)
all_rels = source_rels + target_rels
return all_rels
@staticmethod
def get_property_from_relations(relations: List[Relation], prop: str) -> Set[str]:
"""Return the set of property values on given relations.
Parameters
----------
relations :
The relations, each of which may or may not contain a value for
the given property.
prop :
The key/name of the property to look for on each relation.
Returns
-------
props
A set of the values of the given property on the given list
of relations.
"""
props = {rel.data[prop] for rel in relations if prop in rel.data}
return props
def get_sources(
self,
target: Tuple[str, str],
relation: str = None,
source_type: Optional[str] = None,
target_type: Optional[str] = None,
) -> List[Node]:
"""Return the nodes related to the target via a given relation type.
Parameters
----------
target :
The target node's ID.
relation :
The relation label to constrain to when finding sources.
source_type :
A constraint on the source type
target_type :
A constraint on the target type
Returns
-------
sources
A list of source nodes.
"""
return self.get_common_sources(
[target],
relation,
source_type=source_type,
target_type=target_type,
)
def get_common_sources(
self,
targets: List[Tuple[str, str]],
relation: str,
source_type: Optional[str] = None,
target_type: Optional[str] = None,
) -> List[Node]:
"""Return the common source nodes related to all the given targets
via a given relation type.
Parameters
----------
targets :
The target nodes' IDs.
relation :
The relation label to constrain to when finding sources.
source_type :
A constraint on the source type
target_type :
A constraint on the target type
Returns
-------
sources
A list of source nodes.
"""
parts = [
triple_query(
source_name="s",
source_type=source_type,
relation_type=relation,
target_id=norm_id(*target),
target_type=target_type,
)
for target in targets
]
query = """
MATCH %s
RETURN DISTINCT s
""" % ",".join(
parts
)
nodes = [self.neo4j_to_node(res[0]) for res in self.query_tx(query)]
return nodes
def get_targets(
self,
source: Tuple[str, str],
relation: Optional[str] = None,
source_type: Optional[str] = None,
target_type: Optional[str] = None,
) -> List[Node]:
"""Return the nodes related to the source via a given relation type.
Parameters
----------
source :
Source namespace and identifier.
relation :
The relation label to constrain to when finding targets.
source_type :
A constraint on the source type
target_type :
A constraint on the target type
Returns
-------
targets
A list of target nodes.
"""
return self.get_common_targets(
[source],
relation,
source_type=source_type,
target_type=target_type,
)
def get_common_targets(
self,
sources: List[Tuple[str, str]],
relation: str,
source_type: Optional[str] = None,
target_type: Optional[str] = None,
) -> List[Node]:
"""Return the common target nodes related to all the given sources
via a given relation type.
Parameters
----------
sources :
Source namespace and identifier.
relation :
The relation label to constrain to when finding targets.
source_type :
A constraint on the source type
target_type :
A constraint on the target type
Returns
-------
targets
A list of target nodes.
"""
parts = [
triple_query(
source_id=norm_id(*source),
source_type=source_type,
relation_type=relation,
target_name="t",
target_type=target_type,
)
for source in sources
]
query = """
MATCH %s
RETURN DISTINCT t
""" % ",".join(
parts
)
nodes = [self.neo4j_to_node(res[0]) for res in self.query_tx(query)]
return nodes
def get_target_agents(
self,
source: Tuple[str, str],
relation: str,
source_type: Optional[str] = None,
) -> List[Agent]:
"""Return the nodes related to the source via a given relation type as INDRA Agents.
Parameters
----------
source :
Source namespace and identifier.
relation :
The relation label to constrain to when finding targets.
source_type :
A constraint on the source type
Returns
-------
targets
A list of target nodes as INDRA Agents.
"""
targets = self.get_targets(source, relation, source_type=source_type)
agents = [self.node_to_agent(target) for target in targets]
return agents
def get_source_agents(self, target: Tuple[str, str], relation: str) -> List[Agent]:
"""Return the nodes related to the target via a given relation type as INDRA Agents.
Parameters
----------
target :
Target namespace and identifier.
relation :
The relation label to constrain to when finding sources.
Returns
-------
sources
A list of source nodes as INDRA Agents.
"""
sources = self.get_sources(
target,
relation,
source_type="BioEntity",
target_type="BioEntity",
)
agents = [self.node_to_agent(source) for source in sources]
return agents
def get_predecessors(
self,
target: Tuple[str, str],
relations: Iterable[str],
source_type: Optional[str] = None,
target_type: Optional[str] = None,
) -> List[Node]:
"""Return the nodes that precede the given node via the given relation types.
Parameters
----------
target :
The target node's ID.
relations :
The relation labels to constrain to when finding predecessors.
source_type :
A constraint on the source type
target_type :
A constraint on the target type
Returns
-------
predecessors
A list of predecessor nodes.
"""
match = triple_query(
source_name="s",
source_type=source_type,
relation_type="%s*1.." % "|".join(relations),
target_id=norm_id(*target),
target_type=target_type,
)
query = (
"""
MATCH %s
RETURN DISTINCT s
"""
% match
)
nodes = [self.neo4j_to_node(res[0]) for res in self.query_tx(query)]
return nodes
def get_successors(
self,
source: Tuple[str, str],
relations: Iterable[str],
source_type: Optional[str] = None,
target_type: Optional[str] = None,
) -> List[Node]:
"""Return the nodes that precede the given node via the given relation types.
Parameters
----------
source :
The source node's ID.
relations :
The relation labels to constrain to when finding successors.
source_type :
A constraint on the source type
target_type :
A constraint on the target type
Returns
-------
predecessors
A list of predecessor nodes.
"""
match = triple_query(
source_id=norm_id(*source),
source_type=source_type,
relation_type="%s*1.." % "|".join(relations),
target_name="t",
target_type=target_type,
)
query = (
"""
MATCH %s
RETURN DISTINCT t
"""
% match
)
nodes = [self.neo4j_to_node(res[0]) for res in self.query_tx(query)]
return nodes
@staticmethod
def neo4j_to_node(neo4j_node: neo4j.graph.Node) -> Node:
"""Return a Node from a neo4j internal node.
Parameters
----------
neo4j_node :
A neo4j internal node using its internal data structure and
identifier scheme.
Returns
-------
node :
A Node object with the INDRA standard identifier scheme.
"""
props = dict(neo4j_node)
node_id = props.pop("id")
db_ns, db_id = process_identifier(node_id)
return Node(db_ns, db_id, neo4j_node.labels, props)
@staticmethod
def neo4j_to_relation(neo4j_path: neo4j.graph.Path) -> Relation:
"""Return a Relation from a neo4j internal single-relation path.
Parameters
----------
neo4j_path :
A neo4j internal single-edge path using its internal data structure
and identifier scheme.
Returns
-------
relation :
A Relation object with the INDRA standard identifier scheme.
"""
return Neo4jClient.neo4j_to_relations(neo4j_path)[0]
@staticmethod
def neo4j_to_relations(neo4j_path: neo4j.graph.Path) -> List[Relation]:
"""Return a list of Relations from a neo4j internal multi-relation path.
Parameters
----------
neo4j_path :
A neo4j internal single-edge path using its internal data structure
and identifier scheme.
Returns
-------
:
A list of Relation objects with the INDRA standard identifier
scheme.
"""
relations = []
for neo4j_relation in neo4j_path.relationships:
rel_type = neo4j_relation.type
props = dict(neo4j_relation)
source_ns, source_id = process_identifier(neo4j_relation.start_node["id"])
target_ns, target_id = process_identifier(neo4j_relation.end_node["id"])
rel = Relation(source_ns, source_id, target_ns, target_id, rel_type, props)
relations.append(rel)
return relations
@staticmethod
def node_to_agent(node: Node) -> Agent:
"""Return an INDRA Agent from a Node.
Parameters
----------
node :
A Node object.
Returns
-------
agent :
An INDRA Agent with standardized name and expanded/standardized
db_refs.
"""
name = node.data.get("name")
if not name:
name = f"{node.db_ns}:{node.db_id}"
return get_standard_agent(name, {node.db_ns: node.db_id})
def delete_all(self):
"""Delete everything in the neo4j database."""
query = """MATCH(n) DETACH DELETE n"""
return self.create_tx(query)
def create_nodes(self, nodes: List[Node]):
"""Create a set of new graph nodes."""
nodes_str = ",\n".join([str(n) for n in nodes])
query = """CREATE %s""" % nodes_str
return self.create_tx(query)
def add_nodes(self, nodes: List[Node]):
"""Merge a set of graph nodes (create or update)."""
if not nodes:
return
prop_str = ",\n".join(["n.%s = node.%s" % (k, k) for k in nodes[0].data])
# labels_str = ':'.join(nodes[0].labels)
query = (
"""
UNWIND $nodes AS node
MERGE (n {id: node.id})
SET %s
WITH n, node
CALL apoc.create.addLabels(n, node.labels)
YIELD n
"""
% prop_str
)
return self.create_tx(
query,
query_params={
"nodes": [dict(**n.to_json()["data"], labels=n.labels) for n in nodes]
},
)
def add_relations(self, relations: List[Relation]):
"""Merge a set of graph relations (create or update)."""
if not relations:
return None
labels_str = relations[0].rel_type
prop_str = ",\n".join(
["rel.%s = relation.%s" % (k, k) for k in relations[0].data]
)
query = """
UNWIND $relations AS relation
MATCH (e1 {id: relation.source_id}), (e2 {id: relation.target_id})
MERGE (e1)-[rel:%s]->(e2)
SET %s
""" % (
labels_str,
prop_str,
)
rel_params = []
for rel in relations:
rd = dict(source_id=rel.source_id, target_id=rel.target_id, **rel.data)
rel_params.append(rd)
return self.create_tx(query, query_params={"relations": rel_params})
def add_node(self, node: Node):
"""Merge a single node into the graph."""
prop_str = ",\n".join(["n.%s = '%s'" % (k, v) for k, v in node.data.items()])
query = """
MERGE (n:%s {id: '%s'})
SET %s
""" % (
node.labels,
norm_id(node.db_ns, node.db_id),
prop_str,
)
return self.create_tx(query)
def process_identifier(identifier: str) -> Tuple[str, str]:
"""Process a neo4j-internal identifier string into an INDRA namespace and ID.
Parameters
----------
identifier :
An identifier string (containing both prefix and ID) corresponding
to an internal neo4j graph node.
Returns
-------
db_ns:
An INDRA-standard namespace corresponding to the input identifier.
db_id:
An INDRA-standard identifier corresponding to the input identifier.
"""
graph_ns, graph_id = identifier.split(":", maxsplit=1)
db_ns, db_id = identifiers.get_ns_id_from_identifiers(graph_ns, graph_id)
# This is a corner case where the prefix is not in the registry
# and in those cases we just use the upper case version of the prefix
# in the graph to revert it to the INDRA-compatible key.
if not db_ns:
db_ns = graph_ns.upper()
db_id = graph_id
else:
db_id = identifiers.ensure_prefix_if_needed(db_ns, db_id)
return db_ns, db_id
| 30.100629 | 92 | 0.547263 |
7941f2119393f0343f05fa480a555cdddad49f29 | 2,040 | py | Python | syft_proto/types/syft/v1/shape_pb2.py | gmuraru/syft-proto | b78d9a7ae91cb040b7aaacaf0a0b73589e1f37a1 | [
"Apache-2.0"
] | null | null | null | syft_proto/types/syft/v1/shape_pb2.py | gmuraru/syft-proto | b78d9a7ae91cb040b7aaacaf0a0b73589e1f37a1 | [
"Apache-2.0"
] | null | null | null | syft_proto/types/syft/v1/shape_pb2.py | gmuraru/syft-proto | b78d9a7ae91cb040b7aaacaf0a0b73589e1f37a1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: syft_proto/types/syft/v1/shape.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='syft_proto/types/syft/v1/shape.proto',
package='syft_proto.types.syft.v1',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n$syft_proto/types/syft/v1/shape.proto\x12\x18syft_proto.types.syft.v1\"\x1b\n\x05Shape\x12\x12\n\x04\x64ims\x18\x01 \x03(\x05R\x04\x64imsb\x06proto3')
)
_SHAPE = _descriptor.Descriptor(
name='Shape',
full_name='syft_proto.types.syft.v1.Shape',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dims', full_name='syft_proto.types.syft.v1.Shape.dims', index=0,
number=1, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='dims', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=66,
serialized_end=93,
)
DESCRIPTOR.message_types_by_name['Shape'] = _SHAPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Shape = _reflection.GeneratedProtocolMessageType('Shape', (_message.Message,), {
'DESCRIPTOR' : _SHAPE,
'__module__' : 'syft_proto.types.syft.v1.shape_pb2'
# @@protoc_insertion_point(class_scope:syft_proto.types.syft.v1.Shape)
})
_sym_db.RegisterMessage(Shape)
# @@protoc_insertion_point(module_scope)
| 28.732394 | 172 | 0.748529 |
7941f28e8ba987850b922cf33e0e0a2f20b9e87d | 214 | py | Python | Python/04. Sets/011. The Captain's Room.py | H2u-Hwng/HackerRank-Practice | abe02d04b3e60d30637403204b8735b2b13888a3 | [
"MIT"
] | null | null | null | Python/04. Sets/011. The Captain's Room.py | H2u-Hwng/HackerRank-Practice | abe02d04b3e60d30637403204b8735b2b13888a3 | [
"MIT"
] | null | null | null | Python/04. Sets/011. The Captain's Room.py | H2u-Hwng/HackerRank-Practice | abe02d04b3e60d30637403204b8735b2b13888a3 | [
"MIT"
] | null | null | null | # Problem: https://www.hackerrank.com/challenges/py-the-captains-room/problem
# Score: 10.0
k = int(input())
arr = list(map(int, input().split()))
s = set(arr)
result = (sum(s)*k - sum(arr))//(k-1)
print(result)
| 21.4 | 77 | 0.649533 |
7941f366b8942a5f32bd77e91dd4551066a1090e | 380 | py | Python | apps/Application/views/htmlview.py | flowl/django-init | ee6cefe2ee14ab5fd2dcdc3deabbbcfdd040f36c | [
"MIT"
] | null | null | null | apps/Application/views/htmlview.py | flowl/django-init | ee6cefe2ee14ab5fd2dcdc3deabbbcfdd040f36c | [
"MIT"
] | null | null | null | apps/Application/views/htmlview.py | flowl/django-init | ee6cefe2ee14ab5fd2dcdc3deabbbcfdd040f36c | [
"MIT"
] | null | null | null | from django.views.generic import TemplateView
class HtmlView(TemplateView):
template_name = 'sites/hello_world.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['page_title'] = 'HTML Title'
# context['headline'] = 'Headline'
# context['subheadline'] = 'Subheadline'
return context
| 27.142857 | 52 | 0.663158 |
7941f41f19cac86591be7302ca905c44a33d4a6b | 9,729 | py | Python | torch/nn/utils/parametrizations.py | raven38/pytorch | 3a56758e1fdbc928be3754d5b60e63c7fc55ea45 | [
"Intel"
] | 5 | 2021-08-17T17:44:20.000Z | 2021-08-21T05:03:42.000Z | torch/nn/utils/parametrizations.py | raven38/pytorch | 3a56758e1fdbc928be3754d5b60e63c7fc55ea45 | [
"Intel"
] | 1 | 2021-09-03T09:35:27.000Z | 2021-09-03T09:35:27.000Z | torch/nn/utils/parametrizations.py | raven38/pytorch | 3a56758e1fdbc928be3754d5b60e63c7fc55ea45 | [
"Intel"
] | null | null | null | import torch
from ..utils import parametrize
from ..modules import Module
from .. import functional as F
from typing import Optional
class _SpectralNorm(Module):
def __init__(
self,
weight: torch.Tensor,
n_power_iterations: int = 1,
dim: int = 0,
eps: float = 1e-12
) -> None:
super().__init__()
ndim = weight.ndim
if dim >= ndim or dim < -ndim:
raise IndexError("Dimension out of range (expected to be in range of "
f"[-{ndim}, {ndim - 1}] but got {dim})")
if n_power_iterations <= 0:
raise ValueError('Expected n_power_iterations to be positive, but '
'got n_power_iterations={}'.format(n_power_iterations))
self.dim = dim if dim >= 0 else dim + ndim
self.eps = eps
if ndim > 1:
# For ndim == 1 we do not need to approximate anything (see _SpectralNorm.forward)
self.n_power_iterations = n_power_iterations
weight_mat = self._reshape_weight_to_matrix(weight)
h, w = weight_mat.size()
u = weight_mat.new_empty(h).normal_(0, 1)
v = weight_mat.new_empty(w).normal_(0, 1)
self.register_buffer('_u', F.normalize(u, dim=0, eps=self.eps))
self.register_buffer('_v', F.normalize(v, dim=0, eps=self.eps))
# Start with u, v initialized to some reasonable values by performing a number
# of iterations of the power method
self._power_method(weight_mat, 15)
def _reshape_weight_to_matrix(self, weight: torch.Tensor) -> torch.Tensor:
# Precondition
assert weight.ndim > 1
if self.dim != 0:
# permute dim to front
weight = weight.permute(self.dim, *(d for d in range(weight.dim()) if d != self.dim))
return weight.flatten(1)
@torch.autograd.no_grad()
def _power_method(self, weight_mat: torch.Tensor, n_power_iterations: int) -> None:
# See original note at torch/nn/utils/spectral_norm.py
# NB: If `do_power_iteration` is set, the `u` and `v` vectors are
# updated in power iteration **in-place**. This is very important
# because in `DataParallel` forward, the vectors (being buffers) are
# broadcast from the parallelized module to each module replica,
# which is a new module object created on the fly. And each replica
# runs its own spectral norm power iteration. So simply assigning
# the updated vectors to the module this function runs on will cause
# the update to be lost forever. And the next time the parallelized
# module is replicated, the same randomly initialized vectors are
# broadcast and used!
#
# Therefore, to make the change propagate back, we rely on two
# important behaviors (also enforced via tests):
# 1. `DataParallel` doesn't clone storage if the broadcast tensor
# is already on correct device; and it makes sure that the
# parallelized module is already on `device[0]`.
# 2. If the out tensor in `out=` kwarg has correct shape, it will
# just fill in the values.
# Therefore, since the same power iteration is performed on all
# devices, simply updating the tensors in-place will make sure that
# the module replica on `device[0]` will update the _u vector on the
# parallized module (by shared storage).
#
# However, after we update `u` and `v` in-place, we need to **clone**
# them before using them to normalize the weight. This is to support
# backproping through two forward passes, e.g., the common pattern in
# GAN training: loss = D(real) - D(fake). Otherwise, engine will
# complain that variables needed to do backward for the first forward
# (i.e., the `u` and `v` vectors) are changed in the second forward.
# Precondition
assert weight_mat.ndim > 1
for _ in range(n_power_iterations):
# Spectral norm of weight equals to `u^T W v`, where `u` and `v`
# are the first left and right singular vectors.
# This power iteration produces approximations of `u` and `v`.
self._u = F.normalize(torch.mv(weight_mat, self._v), # type: ignore[has-type]
dim=0, eps=self.eps, out=self._u) # type: ignore[has-type]
self._v = F.normalize(torch.mv(weight_mat.t(), self._u),
dim=0, eps=self.eps, out=self._v) # type: ignore[has-type]
# See above on why we need to clone
self._u = self._u.clone(memory_format=torch.contiguous_format)
self._v = self._v.clone(memory_format=torch.contiguous_format)
def forward(self, weight: torch.Tensor) -> torch.Tensor:
if weight.ndim == 1:
# Faster and more exact path, no need to approximate anything
return F.normalize(weight, dim=0, eps=self.eps)
else:
weight_mat = self._reshape_weight_to_matrix(weight)
if self.training:
self._power_method(weight_mat, self.n_power_iterations)
# The proper way of computing this should be through F.bilinear, but
# it seems to have some efficiency issues:
# https://github.com/pytorch/pytorch/issues/58093
sigma = torch.dot(self._u, torch.mv(weight_mat, self._v))
return weight / sigma
def right_inverse(self, value: torch.Tensor) -> torch.Tensor:
# we may want to assert here that the passed value already
# satisfies constraints
return value
def spectral_norm(module: Module,
name: str = 'weight',
n_power_iterations: int = 1,
eps: float = 1e-12,
dim: Optional[int] = None) -> Module:
r"""Applies spectral normalization to a parameter in the given module.
.. math::
\mathbf{W}_{SN} = \dfrac{\mathbf{W}}{\sigma(\mathbf{W})},
\sigma(\mathbf{W}) = \max_{\mathbf{h}: \mathbf{h} \ne 0} \dfrac{\|\mathbf{W} \mathbf{h}\|_2}{\|\mathbf{h}\|_2}
When applied on a vector, it simplifies to
.. math::
\mathbf{x}_{SN} = \dfrac{\mathbf{x}}{\|\mathbf{x}\|_2}
Spectral normalization stabilizes the training of discriminators (critics)
in Generative Adversarial Networks (GANs) by reducing the Lipschitz constant
of the model. :math:`\sigma` is approximated performing one iteration of the
`power method`_ every time the weight is accessed. If the dimension of the
weight tensor is greater than 2, it is reshaped to 2D in power iteration
method to get spectral norm.
See `Spectral Normalization for Generative Adversarial Networks`_ .
.. _`power method`: https://en.wikipedia.org/wiki/Power_iteration
.. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957
.. note::
This function is implemented using the new parametrization functionality
in :func:`torch.nn.utils.parametrize.register_parametrization`. It is a
reimplementation of :func:`torch.nn.utils.spectral_norm`.
.. note::
When this constraint is registered, the singular vectors associated to the largest
singular value are estimated rather than sampled at random. These are then updated
performing :attr:`n_power_iterations` of the `power method`_ whenever the tensor
is accessed with the module on `training` mode.
.. note::
If the `_SpectralNorm` module, i.e., `module.parametrization.weight[idx]`,
is in training mode on removal, it will perform another power iteration.
If you'd like to avoid this iteration, set the module to eval mode
before its removal.
Args:
module (nn.Module): containing module
name (str, optional): name of weight parameter
n_power_iterations (int, optional): number of power iterations to
calculate spectral norm
eps (float, optional): epsilon for numerical stability in
calculating norms
dim (int, optional): dimension corresponding to number of outputs,
the default is ``0``, except for modules that are instances of
ConvTranspose{1,2,3}d, when it is ``1``
Returns:
The original module with a new parametrization registered to the specified
weight
Example::
>>> snm = spectral_norm(nn.Linear(20, 40))
>>> snm
ParametrizedLinear(
in_features=20, out_features=40, bias=True
(parametrizations): ModuleDict(
(weight): ParametrizationList(
(0): _SpectralNorm()
)
)
)
>>> torch.linalg.matrix_norm(snm.weight, 2)
tensor(1.0000, grad_fn=<CopyBackwards>)
"""
if not hasattr(module, name):
raise ValueError(
"Module '{}' has no attribute with name '{}'".format(module, name)
)
# getattr should get the correct parametrized weight if there
# is already an parametrization registered
weight = getattr(module, name)
if dim is None:
if isinstance(module, (torch.nn.ConvTranspose1d,
torch.nn.ConvTranspose2d,
torch.nn.ConvTranspose3d)):
dim = 1
else:
dim = 0
parametrize.register_parametrization(module, name, _SpectralNorm(weight, n_power_iterations, dim, eps))
return module
| 45.891509 | 118 | 0.62031 |
7941f5fcd3fec70be9521e4a5ff72675827a5bfc | 593 | py | Python | project_euler/problem_01/sol4.py | ELR424/Python | a212efee5b44312c8b4b626ae412bacc5f4117fd | [
"MIT"
] | 1,568 | 2019-04-25T11:54:45.000Z | 2022-03-31T23:35:23.000Z | project_euler/problem_01/sol4.py | rayenough/Python | 2fc2ae3f32fad16226c88358cb7c9e4e5c790a8f | [
"MIT"
] | 58 | 2019-02-20T10:45:50.000Z | 2020-09-30T12:18:45.000Z | project_euler/problem_01/sol4.py | rayenough/Python | 2fc2ae3f32fad16226c88358cb7c9e4e5c790a8f | [
"MIT"
] | 464 | 2019-04-17T04:57:16.000Z | 2022-03-31T04:12:57.000Z | def mulitples(limit):
xmulti = []
zmulti = []
z = 3
x = 5
temp = 1
while True:
result = z * temp
if (result < limit):
zmulti.append(result)
temp += 1
else:
temp = 1
break
while True:
result = x * temp
if (result < limit):
xmulti.append(result)
temp += 1
else:
break
collection = list(set(xmulti+zmulti))
return (sum(collection))
print (mulitples(1000))
| 19.129032 | 42 | 0.403035 |
7941f6b5d72f23868dc32feab18a943f1f0c948d | 170,326 | py | Python | tensorflow/python/keras/backend.py | santhoshkumarvs/tensorflow | 5581b91ada226f1ec20f55cd6423853072b2813c | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/backend.py | santhoshkumarvs/tensorflow | 5581b91ada226f1ec20f55cd6423853072b2813c | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/backend.py | santhoshkumarvs/tensorflow | 5581b91ada226f1ec20f55cd6423853072b2813c | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
# pylint: disable=redefined-builtin
"""Keras backend API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import json
import os
import threading
import weakref
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as session_module
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.eager import context
from tensorflow.python.eager import function as eager_function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as tfdev
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend_config
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients as gradients_module
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import map_fn as map_fn_lib
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.training import server_lib
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
py_all = all
py_sum = sum
# INTERNAL UTILS
# The internal graph maintained by Keras and used by the symbolic Keras APIs
# while executing eagerly (such as the functional API for model-building).
_GRAPH = None
# A graph which is used for constructing functions in eager mode.
_CURRENT_SCRATCH_GRAPH = None
# This is a thread local object that will hold the default internal TF session
# used by Keras. It can be set manually via `set_session(sess)`.
_SESSION = threading.local()
# This dictionary holds a mapping {graph: learning_phase}.
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
_GRAPH_LEARNING_PHASES = weakref.WeakKeyDictionary()
# _DUMMY_EAGER_GRAPH is used as a key in _GRAPH_LEARNING_PHASES.
# We keep a separate reference to it to make sure it does not get removed from
# _GRAPH_LEARNING_PHASES.
_DUMMY_EAGER_GRAPH = threading.local()
# This boolean flag can be set to True to leave variable initialization
# up to the user.
# Change its value via `manual_variable_initialization(value)`.
_MANUAL_VAR_INIT = False
# This list holds the available devices.
# It is populated when `_get_available_gpus()` is called for the first time.
# We assume our devices don't change henceforth.
_LOCAL_DEVICES = None
# This dictionary holds a mapping between a graph and variables to initialize
# in the graph.
_GRAPH_VARIABLES = weakref.WeakKeyDictionary()
# This dictionary holds a mapping between a graph and TF optimizers created in
# the graph.
_GRAPH_TF_OPTIMIZERS = weakref.WeakKeyDictionary()
# The below functions are kept accessible from backend for compatibility.
epsilon = backend_config.epsilon
floatx = backend_config.floatx
image_data_format = backend_config.image_data_format
set_epsilon = backend_config.set_epsilon
set_floatx = backend_config.set_floatx
set_image_data_format = backend_config.set_image_data_format
@keras_export('keras.backend.backend')
def backend():
"""Publicly accessible method for determining the current backend.
Only exists for API compatibility with multi-backend Keras.
Returns:
The string "tensorflow".
"""
return 'tensorflow'
@keras_export('keras.backend.cast_to_floatx')
def cast_to_floatx(x):
"""Cast a Numpy array to the default Keras float type.
Arguments:
x: Numpy array.
Returns:
The same Numpy array, cast to its new type.
Example:
```python
>>> from keras import backend as K
>>> K.floatx()
'float32'
>>> arr = numpy.array([1.0, 2.0], dtype='float64')
>>> arr.dtype
dtype('float64')
>>> new_arr = K.cast_to_floatx(arr)
>>> new_arr
array([ 1., 2.], dtype=float32)
>>> new_arr.dtype
dtype('float32')
```
"""
return np.asarray(x, dtype=floatx())
# A global dictionary mapping graph objects to an index of counters used
# for various layer names in each graph.
# Allows to give unique autogenerated names to layers, in a graph-specific way.
PER_GRAPH_LAYER_NAME_UIDS = weakref.WeakKeyDictionary()
@keras_export('keras.backend.get_uid')
def get_uid(prefix=''):
"""Associates a string prefix with an integer counter in a TensorFlow graph.
Arguments:
prefix: String prefix to index.
Returns:
Unique integer ID.
Example:
```
>>> get_uid('dense')
1
>>> get_uid('dense')
2
```
"""
graph = get_graph()
if graph not in PER_GRAPH_LAYER_NAME_UIDS:
PER_GRAPH_LAYER_NAME_UIDS[graph] = collections.defaultdict(int)
layer_name_uids = PER_GRAPH_LAYER_NAME_UIDS[graph]
layer_name_uids[prefix] += 1
return layer_name_uids[prefix]
@keras_export('keras.backend.reset_uids')
def reset_uids():
"""Resets graph identifiers.
"""
per_graph_layer_name_uids = PER_GRAPH_LAYER_NAME_UIDS
keys = list(per_graph_layer_name_uids.keys())
for key in keys:
del per_graph_layer_name_uids[key]
@keras_export('keras.backend.clear_session')
def clear_session():
"""Destroys the current TF graph and creates a new one.
Useful to avoid clutter from old models / layers.
"""
global _SESSION
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
global _GRAPH_VARIABLES # pylint: disable=global-variable-not-assigned
global _GRAPH_TF_OPTIMIZERS # pylint: disable=global-variable-not-assigned
ops.reset_default_graph()
reset_uids()
_SESSION.session = None
graph = get_graph()
with graph.as_default():
with ops.name_scope(''):
phase = array_ops.placeholder_with_default(
False, shape=(), name='keras_learning_phase')
_GRAPH_LEARNING_PHASES = {}
_GRAPH_LEARNING_PHASES[graph] = phase
_GRAPH_VARIABLES.pop(graph, None)
_GRAPH_TF_OPTIMIZERS.pop(graph, None)
@keras_export('keras.backend.manual_variable_initialization')
def manual_variable_initialization(value):
"""Sets the manual variable initialization flag.
This boolean flag determines whether
variables should be initialized
as they are instantiated (default), or if
the user should handle the initialization
(e.g. via `tf.initialize_all_variables()`).
Arguments:
value: Python boolean.
"""
global _MANUAL_VAR_INIT
_MANUAL_VAR_INIT = value
@keras_export('keras.backend.learning_phase')
def learning_phase():
"""Returns the learning phase flag.
The learning phase flag is a bool tensor (0 = test, 1 = train)
to be passed as input to any Keras function
that uses a different behavior at train time and test time.
Returns:
Learning phase (scalar integer tensor or Python integer).
"""
if ops.get_default_graph() is _GRAPH:
# Don't enter an init_scope for the learning phase if eager execution
# is enabled but we're inside the Keras workspace graph.
return symbolic_learning_phase()
with ops.init_scope():
# We always check & set the learning phase inside the init_scope,
# otherwise the wrong default_graph will be used to look up the learning
# phase inside of functions & defuns.
#
# This is because functions & defuns (both in graph & in eager mode)
# will always execute non-eagerly using a function-specific default
# subgraph.
if context.executing_eagerly():
if _DUMMY_EAGER_GRAPH not in _GRAPH_LEARNING_PHASES:
# Fallback to inference mode as default.
return 0
return _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH]
return symbolic_learning_phase()
def symbolic_learning_phase():
graph = get_graph()
with graph.as_default():
if graph not in _GRAPH_LEARNING_PHASES:
with ops.name_scope(''):
phase = array_ops.placeholder_with_default(
False, shape=(), name='keras_learning_phase')
_GRAPH_LEARNING_PHASES[graph] = phase
return _GRAPH_LEARNING_PHASES[graph]
@keras_export('keras.backend.set_learning_phase')
def set_learning_phase(value):
"""Sets the learning phase to a fixed value.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
with ops.init_scope():
if context.executing_eagerly():
# In an eager context, the learning phase values applies to both the eager
# context and the internal Keras graph.
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = value
_GRAPH_LEARNING_PHASES[get_graph()] = value
def set_eager_learning_phase(value):
"""Internal utility that sets the learning phase in eager execution only.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
assert value in {0, 1}
assert context.executing_eagerly()
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = value
@keras_export('keras.backend.learning_phase_scope')
@tf_contextlib.contextmanager
def learning_phase_scope(value):
"""Provides a scope within which the learning phase is equal to `value`.
The learning phase gets restored to its original value upon exiting the scope.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
with ops.init_scope():
if context.executing_eagerly():
previous_eager_value = _GRAPH_LEARNING_PHASES.get(
_DUMMY_EAGER_GRAPH, None)
previous_graph_value = _GRAPH_LEARNING_PHASES.get(get_graph(), None)
try:
set_learning_phase(value)
yield
finally:
# Restore learning phase to initial value.
with ops.init_scope():
if context.executing_eagerly():
if previous_eager_value is not None:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = previous_eager_value
elif _DUMMY_EAGER_GRAPH in _GRAPH_LEARNING_PHASES:
del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH]
graph = get_graph()
if previous_graph_value is not None:
_GRAPH_LEARNING_PHASES[graph] = previous_graph_value
elif graph in _GRAPH_LEARNING_PHASES:
del _GRAPH_LEARNING_PHASES[graph]
@tf_contextlib.contextmanager
def eager_learning_phase_scope(value):
"""Internal scope that sets the learning phase in eager execution only.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
assert value in {0, 1}
assert context.executing_eagerly()
previous_value = learning_phase()
try:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = value
yield
finally:
# Restore learning phase to initial value.
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH] = previous_value
def _current_graph(op_input_list):
"""Return the graph members of `op_input_list`, or the current graph."""
return ops._get_graph_from_inputs(op_input_list)
def _get_session(op_input_list=()):
"""Returns the session object for the current thread."""
global _SESSION
default_session = ops.get_default_session()
if default_session is not None:
session = default_session
else:
if ops.inside_function():
raise RuntimeError('Cannot get session inside Tensorflow graph function.')
# If we don't have a session, or that session does not match the current
# graph, create and cache a new session.
if (getattr(_SESSION, 'session', None) is None or
_SESSION.session.graph is not _current_graph(op_input_list)):
# If we are creating the Session inside a tf.distribute.Strategy scope,
# we ask the strategy for the right session options to use.
if distribution_strategy_context.has_strategy():
configure_and_create_distributed_session(
distribution_strategy_context.get_strategy())
else:
_SESSION.session = session_module.Session(
config=get_default_session_config())
session = _SESSION.session
return session
@keras_export(v1=['keras.backend.get_session'])
def get_session(op_input_list=()):
"""Returns the TF session to be used by the backend.
If a default TensorFlow session is available, we will return it.
Else, we will return the global Keras session assuming it matches
the current graph.
If no global Keras session exists at this point:
we will create a new global session.
Note that you can manually set the global session
via `K.set_session(sess)`.
Arguments:
op_input_list: An option sequence of tensors or ops, which will be used
to determine the current graph. Otherwise the default graph will be
used.
Returns:
A TensorFlow session.
"""
session = _get_session(op_input_list)
if not _MANUAL_VAR_INIT:
with session.graph.as_default():
_initialize_variables(session)
return session
def get_graph():
if context.executing_eagerly():
global _GRAPH
if _GRAPH is None:
_GRAPH = func_graph.FuncGraph('keras_graph')
return _GRAPH
else:
return ops.get_default_graph()
@tf_contextlib.contextmanager
def _scratch_graph(graph=None):
"""Retrieve a shared and temporary func graph.
The eager execution path lifts a subgraph from the keras global graph into
a scratch graph in order to create a function. DistributionStrategies, in
turn, constructs multiple functions as well as a final combined function. In
order for that logic to work correctly, all of the functions need to be
created on the same scratch FuncGraph.
Args:
graph: A graph to be used as the current scratch graph. If not set then
a scratch graph will either be retrieved or created:
Yields:
The current scratch graph.
"""
global _CURRENT_SCRATCH_GRAPH
if (_CURRENT_SCRATCH_GRAPH is not None and graph is not None and
_CURRENT_SCRATCH_GRAPH is not graph):
raise ValueError('Multiple scratch graphs specified.')
if _CURRENT_SCRATCH_GRAPH:
yield _CURRENT_SCRATCH_GRAPH
return
graph = graph or func_graph.FuncGraph('keras_scratch_graph')
try:
_CURRENT_SCRATCH_GRAPH = graph
yield graph
finally:
_CURRENT_SCRATCH_GRAPH = None
@keras_export('keras.backend.set_session')
def set_session(session):
"""Sets the global TensorFlow session.
Arguments:
session: A TF Session.
"""
global _SESSION
_SESSION.session = session
def get_default_session_config():
if not os.environ.get('OMP_NUM_THREADS'):
config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
num_thread = int(os.environ.get('OMP_NUM_THREADS'))
config = config_pb2.ConfigProto(
intra_op_parallelism_threads=num_thread,
inter_op_parallelism_threads=num_thread,
allow_soft_placement=True)
return config
# DEVICE MANIPULATION
class _TfDeviceCaptureOp(object):
"""Class for capturing the TF device scope."""
def __init__(self):
self.device = None
def _set_device(self, device):
"""This method captures TF's explicit device scope setting."""
if tfdev.is_device_spec(device):
device = device.to_string()
self.device = device
def _set_device_from_string(self, device_str):
self.device = device_str
def _get_current_tf_device():
"""Return explicit device of current context, otherwise returns `None`.
Returns:
If the current device scope is explicitly set, it returns a string with
the device (`CPU` or `GPU`). If the scope is not explicitly set, it will
return `None`.
"""
graph = get_graph()
op = _TfDeviceCaptureOp()
graph._apply_device_functions(op)
return tfdev.DeviceSpec.from_string(op.device)
def _is_current_explicit_device(device_type):
"""Check if the current device is explicitly set on the device type specified.
Arguments:
device_type: A string containing `GPU` or `CPU` (case-insensitive).
Returns:
A boolean indicating if the current device scope is explicitly set on the
device type.
Raises:
ValueError: If the `device_type` string indicates an unsupported device.
"""
device_type = device_type.upper()
if device_type not in ['CPU', 'GPU']:
raise ValueError('`device_type` should be either "CPU" or "GPU".')
device = _get_current_tf_device()
return device is not None and device.device_type == device_type.upper()
def _get_available_gpus():
"""Get a list of available gpu devices (formatted as strings).
Returns:
A list of available GPU devices.
"""
if ops.executing_eagerly_outside_functions():
# Returns names of devices directly.
return [name for name in context.list_devices() if 'GPU' in name]
global _LOCAL_DEVICES
if _LOCAL_DEVICES is None:
_LOCAL_DEVICES = get_session().list_devices()
return [x.name for x in _LOCAL_DEVICES if x.device_type == 'GPU']
def _has_nchw_support():
"""Check whether the current scope supports NCHW ops.
TensorFlow does not support NCHW on CPU. Therefore we check if we are not
explicitly put on
CPU, and have GPUs available. In this case there will be soft-placing on the
GPU device.
Returns:
bool: if the current scope device placement would support nchw
"""
explicitly_on_cpu = _is_current_explicit_device('CPU')
gpus_available = bool(_get_available_gpus())
return not explicitly_on_cpu and gpus_available
# VARIABLE MANIPULATION
def _constant_to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
This is slightly faster than the _to_tensor function, at the cost of
handling fewer cases.
Arguments:
x: An object to be converted (numpy arrays, floats, ints and lists of
them).
dtype: The destination type.
Returns:
A tensor.
"""
return constant_op.constant(x, dtype=dtype)
def _to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
Arguments:
x: An object to be converted (numpy array, list, tensors).
dtype: The destination type.
Returns:
A tensor.
"""
return ops.convert_to_tensor(x, dtype=dtype)
@keras_export('keras.backend.is_sparse')
def is_sparse(tensor):
"""Returns whether a tensor is a sparse tensor.
Arguments:
tensor: A tensor instance.
Returns:
A boolean.
Example:
```python
>>> from keras import backend as K
>>> a = K.placeholder((2, 2), sparse=False)
>>> print(K.is_sparse(a))
False
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
```
"""
return isinstance(tensor, sparse_tensor.SparseTensor)
@keras_export('keras.backend.to_dense')
def to_dense(tensor):
"""Converts a sparse tensor into a dense tensor and returns it.
Arguments:
tensor: A tensor instance (potentially sparse).
Returns:
A dense tensor.
Examples:
```python
>>> from keras import backend as K
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
>>> c = K.to_dense(b)
>>> print(K.is_sparse(c))
False
```
"""
if is_sparse(tensor):
return sparse_ops.sparse_tensor_to_dense(tensor)
else:
return tensor
name_scope = ops.name_scope
@keras_export('keras.backend.variable')
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
Arguments:
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
Returns:
A variable instance (with Keras metadata included).
Examples:
```python
>>> import numpy as np
>>> from keras import backend as K
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val, dtype='float64', name='example_var')
>>> K.dtype(kvar)
'float64'
>>> print(kvar)
example_var
>>> kvar.eval()
array([[ 1., 2.],
[ 3., 4.]])
```
"""
if dtype is None:
dtype = floatx()
if hasattr(value, 'tocoo'):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims(
sparse_coo.col, 1)), 1)
v = sparse_tensor.SparseTensor(
indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape)
v._keras_shape = sparse_coo.shape
return v
v = resource_variable_ops.ResourceVariable(
value,
dtype=dtypes_module.as_dtype(dtype),
name=name,
constraint=constraint)
if isinstance(value, np.ndarray):
v._keras_shape = value.shape
elif hasattr(value, 'shape'):
v._keras_shape = int_shape(value)
track_variable(v)
return v
def track_tf_optimizer(tf_optimizer):
"""Tracks the given TF optimizer for initialization of its variables."""
if context.executing_eagerly():
return
graph = get_graph()
optimizers = _GRAPH_TF_OPTIMIZERS.setdefault(graph, weakref.WeakSet())
optimizers.add(tf_optimizer)
def track_variable(v):
"""Tracks the given variable for initialization."""
if context.executing_eagerly():
return
graph = v.graph if hasattr(v, 'graph') else get_graph()
if graph not in _GRAPH_VARIABLES:
_GRAPH_VARIABLES[graph] = weakref.WeakSet()
_GRAPH_VARIABLES[graph].add(v)
def _get_variables(graph=None):
"""Returns variables corresponding to the given graph for initialization."""
assert not context.executing_eagerly()
variables = _GRAPH_VARIABLES.setdefault(graph, weakref.WeakSet())
for opt in _GRAPH_TF_OPTIMIZERS.get(graph, set()):
variables.update(opt.optimizer.variables())
return variables
def _initialize_variables(session):
"""Utility to initialize uninitialized variables on the fly."""
variables = _get_variables(get_graph())
candidate_vars = []
for v in variables:
if not getattr(v, '_keras_initialized', False):
candidate_vars.append(v)
if candidate_vars:
# This step is expensive, so we only run it on variables not already
# marked as initialized.
is_initialized = session.run(
[variables_module.is_variable_initialized(v) for v in candidate_vars])
uninitialized_vars = []
for flag, v in zip(is_initialized, candidate_vars):
if not flag:
uninitialized_vars.append(v)
v._keras_initialized = True
if uninitialized_vars:
session.run(variables_module.variables_initializer(uninitialized_vars))
@keras_export('keras.backend.constant')
def constant(value, dtype=None, shape=None, name=None):
"""Creates a constant tensor.
Arguments:
value: A constant value (or list)
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
Returns:
A Constant Tensor.
"""
if dtype is None:
dtype = floatx()
# If the outer context is eager but we are executing under the keras
# FuncGraph, we create EagerTensors and use them as constants.
if (ops.executing_eagerly_outside_functions() and
getattr(get_graph(), 'name', '') == 'keras_graph'):
with ops.init_scope():
return constant_op.constant(value, dtype=dtype, shape=shape, name=name)
return constant_op.constant(value, dtype=dtype, shape=shape, name=name)
def is_keras_tensor(x):
"""Returns whether `x` is a Keras tensor.
A "Keras tensor" is a tensor that was returned by a Keras layer,
(`Layer` class) or by `Input`.
Arguments:
x: A candidate tensor.
Returns:
A boolean: Whether the argument is a Keras tensor.
Raises:
ValueError: In case `x` is not a symbolic tensor.
Examples:
```python
>>> import tensorflow as tf
>>> import numpy
>>> from keras import backend as K
>>> from keras.layers import Input, Dense
>>> np_var = numpy.array([1, 2])
>>> K.is_keras_tensor(np_var) # A numpy array is not a symbolic tensor.
ValueError
>>> k_var = tf.placeholder('float32', shape=(1,1))
>>> K.is_keras_tensor(k_var) # A variable indirectly created outside of
keras is not a Keras tensor.
False
>>> keras_var = K.variable(np_var)
>>> K.is_keras_tensor(keras_var) # A variable created with the keras
backend is not a Keras tensor.
False
>>> keras_placeholder = K.placeholder(shape=(2, 4, 5))
>>> K.is_keras_tensor(keras_placeholder) # A placeholder is not a Keras
tensor.
False
>>> keras_input = Input([10])
>>> K.is_keras_tensor(keras_input) # An Input is a Keras tensor.
True
>>> keras_layer_output = Dense(10)(keras_input)
>>> K.is_keras_tensor(keras_layer_output) # Any Keras layer output is a
Keras tensor.
True
```
"""
if not isinstance(x, (ops.Tensor,
variables_module.Variable,
sparse_tensor.SparseTensor)):
raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) +
'`. Expected a symbolic tensor instance.')
return hasattr(x, '_keras_history')
@keras_export('keras.backend.placeholder')
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None):
"""Instantiates a placeholder tensor and returns it.
Arguments:
shape: Shape of the placeholder
(integer tuple, may include `None` entries).
ndim: Number of axes of the tensor.
At least one of {`shape`, `ndim`} must be specified.
If both are specified, `shape` is used.
dtype: Placeholder type.
sparse: Boolean, whether the placeholder should have a sparse type.
name: Optional name string for the placeholder.
Raises:
ValueError: If called with eager execution.
Returns:
Tensor instance (with Keras metadata included).
Examples:
```python
>>> from keras import backend as K
>>> input_ph = K.placeholder(shape=(2, 4, 5))
>>> input_ph
<tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32>
```
"""
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = tuple([None for _ in range(ndim)])
with get_graph().as_default():
if sparse:
x = array_ops.sparse_placeholder(dtype, shape=shape, name=name)
else:
x = array_ops.placeholder(dtype, shape=shape, name=name)
return x
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
Arguments:
x: A candidate placeholder.
Returns:
Boolean.
"""
try:
return x.op.type == 'Placeholder'
except AttributeError:
return False
@keras_export('keras.backend.shape')
def shape(x):
"""Returns the symbolic shape of a tensor or variable.
Arguments:
x: A tensor or variable.
Returns:
A symbolic shape (which is itself a tensor).
Examples:
```python
# TensorFlow example
>>> from keras import backend as K
>>> tf_session = K.get_session()
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> input = keras.backend.placeholder(shape=(2, 4, 5))
>>> K.shape(kvar)
<tf.Tensor 'Shape_8:0' shape=(2,) dtype=int32>
>>> K.shape(input)
<tf.Tensor 'Shape_9:0' shape=(3,) dtype=int32>
# To get integer shape (Instead, you can use K.int_shape(x))
>>> K.shape(kvar).eval(session=tf_session)
array([2, 2], dtype=int32)
>>> K.shape(input).eval(session=tf_session)
array([2, 4, 5], dtype=int32)
```
"""
return array_ops.shape(x)
@keras_export('keras.backend.int_shape')
def int_shape(x):
"""Returns the shape of tensor or variable as a tuple of int or None entries.
Arguments:
x: Tensor or variable.
Returns:
A tuple of integers (or None entries).
Examples:
```python
>>> from keras import backend as K
>>> input = K.placeholder(shape=(2, 4, 5))
>>> K.int_shape(input)
(2, 4, 5)
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.int_shape(kvar)
(2, 2)
```
"""
try:
shape = x.shape
if not isinstance(shape, tuple):
shape = tuple(shape.as_list())
return shape
except ValueError:
return None
@keras_export('keras.backend.ndim')
def ndim(x):
"""Returns the number of axes in a tensor, as an integer.
Arguments:
x: Tensor or variable.
Returns:
Integer (scalar), number of axes.
Examples:
```python
>>> from keras import backend as K
>>> input = K.placeholder(shape=(2, 4, 5))
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = K.variable(value=val)
>>> K.ndim(input)
3
>>> K.ndim(kvar)
2
```
"""
dims = x.shape._dims
if dims is not None:
return len(dims)
return None
@keras_export('keras.backend.dtype')
def dtype(x):
"""Returns the dtype of a Keras tensor or variable, as a string.
Arguments:
x: Tensor or variable.
Returns:
String, dtype of `x`.
Examples:
```python
>>> from keras import backend as K
>>> K.dtype(K.placeholder(shape=(2,4,5)))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float32'))
'float32'
>>> K.dtype(K.placeholder(shape=(2,4,5), dtype='float64'))
'float64'
# Keras variable
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]))
>>> K.dtype(kvar)
'float32'
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.dtype(kvar)
'float32'
```
"""
return x.dtype.base_dtype.name
@keras_export('keras.backend.eval')
def eval(x):
"""Evaluates the value of a variable.
Arguments:
x: A variable.
Returns:
A Numpy array.
Examples:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32')
>>> K.eval(kvar)
array([[ 1., 2.],
[ 3., 4.]], dtype=float32)
```
"""
return get_value(to_dense(x))
@keras_export('keras.backend.zeros')
def zeros(shape, dtype=None, name=None):
"""Instantiates an all-zeros variable and returns it.
Arguments:
shape: Tuple of integers, shape of returned Keras variable
dtype: String, data type of returned Keras variable
name: String, name of returned Keras variable
Returns:
A variable (including Keras metadata), filled with `0.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.zeros((3,4))
>>> K.eval(kvar)
array([[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.]], dtype=float32)
```
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.zeros(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
track_variable(v)
return v
@keras_export('keras.backend.ones')
def ones(shape, dtype=None, name=None):
"""Instantiates an all-ones variable and returns it.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, filled with `1.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.ones((3,4))
>>> K.eval(kvar)
array([[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.],
[ 1., 1., 1., 1.]], dtype=float32)
```
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.ones(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
track_variable(v)
return v
@keras_export('keras.backend.eye')
def eye(size, dtype=None, name=None):
"""Instantiate an identity matrix and returns it.
Arguments:
size: Integer, number of rows/columns.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, an identity matrix.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.eye(3)
>>> K.eval(kvar)
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
return variable(linalg_ops.eye(size, dtype=tf_dtype), dtype, name)
@keras_export('keras.backend.zeros_like')
def zeros_like(x, dtype=None, name=None):
"""Instantiates an all-zeros variable of the same shape as another tensor.
Arguments:
x: Keras variable or Keras tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with zeros.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_zeros = K.zeros_like(kvar)
>>> K.eval(kvar_zeros)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
"""
return array_ops.zeros_like(x, dtype=dtype, name=name)
@keras_export('keras.backend.ones_like')
def ones_like(x, dtype=None, name=None):
"""Instantiates an all-ones variable of the same shape as another tensor.
Arguments:
x: Keras variable or tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with ones.
Example:
```python
>>> from keras import backend as K
>>> kvar = K.variable(np.random.random((2,3)))
>>> kvar_ones = K.ones_like(kvar)
>>> K.eval(kvar_ones)
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
```
"""
return array_ops.ones_like(x, dtype=dtype, name=name)
def identity(x, name=None):
"""Returns a tensor with the same content as the input tensor.
Arguments:
x: The input tensor.
name: String, name for the variable to create.
Returns:
A tensor of the same shape, type and content.
"""
return array_ops.identity(x, name=name)
@keras_export('keras.backend.random_uniform_variable')
def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):
"""Instantiates a variable with values drawn from a uniform distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
low: Float, lower boundary of the output interval.
high: Float, upper boundary of the output interval.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_uniform_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab40b10>
>>> K.eval(kvar)
array([[ 0.10940075, 0.10047495, 0.476143 ],
[ 0.66137183, 0.00869417, 0.89220798]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_uniform_initializer(
low, high, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@keras_export('keras.backend.random_normal_variable')
def random_normal_variable(shape, mean, scale, dtype=None, name=None,
seed=None):
"""Instantiates a variable with values drawn from a normal distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
mean: Float, mean of the normal distribution.
scale: Float, standard deviation of the normal distribution.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
```python
# TensorFlow example
>>> kvar = K.random_normal_variable((2,3), 0, 1)
>>> kvar
<tensorflow.python.ops.variables.Variable object at 0x10ab12dd0>
>>> K.eval(kvar)
array([[ 1.19591331, 0.68685907, -0.63814116],
[ 0.92629528, 0.28055015, 1.70484698]], dtype=float32)
```
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_normal_initializer(
mean, scale, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@keras_export('keras.backend.count_params')
def count_params(x):
"""Returns the static number of elements in a variable or tensor.
Arguments:
x: Variable or tensor.
Returns:
Integer, the number of scalars in `x`.
Example:
```python
>>> kvar = K.zeros((2,3))
>>> K.count_params(kvar)
6
>>> K.eval(kvar)
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
```
"""
return np.prod(x.shape.as_list())
@keras_export('keras.backend.cast')
def cast(x, dtype):
"""Casts a tensor to a different dtype and returns it.
You can cast a Keras variable but it still returns a Keras tensor.
Arguments:
x: Keras tensor (or variable).
dtype: String, either (`'float16'`, `'float32'`, or `'float64'`).
Returns:
Keras tensor with dtype `dtype`.
Example:
```python
>>> from keras import backend as K
>>> input = K.placeholder((2, 3), dtype='float32')
>>> input
<tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32>
# It doesn't work in-place as below.
>>> K.cast(input, dtype='float16')
<tf.Tensor 'Cast_1:0' shape=(2, 3) dtype=float16>
>>> input
<tf.Tensor 'Placeholder_2:0' shape=(2, 3) dtype=float32>
# you need to assign it.
>>> input = K.cast(input, dtype='float16')
>>> input
<tf.Tensor 'Cast_2:0' shape=(2, 3) dtype=float16>
```
"""
return math_ops.cast(x, dtype)
# UPDATES OPS
@keras_export('keras.backend.update')
def update(x, new_x):
return state_ops.assign(x, new_x)
@keras_export('keras.backend.update_add')
def update_add(x, increment):
"""Update the value of `x` by adding `increment`.
Arguments:
x: A Variable.
increment: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_add(x, increment)
@keras_export('keras.backend.update_sub')
def update_sub(x, decrement):
"""Update the value of `x` by subtracting `decrement`.
Arguments:
x: A Variable.
decrement: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_sub(x, decrement)
@keras_export('keras.backend.moving_average_update')
def moving_average_update(x, value, momentum):
"""Compute the moving average of a variable.
Arguments:
x: A Variable.
value: A tensor with the same shape as `variable`.
momentum: The moving average momentum.
Returns:
An Operation to update the variable.
"""
# `training` is higher-up than the Keras backend in the abstraction hierarchy.
# In particular, `training` depends on layers, and thus on Keras.
# moving_averages, being low-level ops, should not be part of the training
# module.
from tensorflow.python.training import moving_averages # pylint: disable=g-import-not-at-top
return moving_averages.assign_moving_average(
x, value, momentum, zero_debias=True)
# LINEAR ALGEBRA
@keras_export('keras.backend.dot')
def dot(x, y):
"""Multiplies 2 tensors (and/or variables) and returns a *tensor*.
When attempting to multiply a nD tensor
with a nD tensor, it reproduces the Theano behavior.
(e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`)
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor, dot product of `x` and `y`.
Examples:
```python
# dot product between tensors
>>> x = K.placeholder(shape=(2, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(2, 4) dtype=float32>
```
```python
# dot product between tensors
>>> x = K.placeholder(shape=(32, 28, 3))
>>> y = K.placeholder(shape=(3, 4))
>>> xy = K.dot(x, y)
>>> xy
<tf.Tensor 'MatMul_9:0' shape=(32, 28, 4) dtype=float32>
```
```python
# Theano-like behavior example
>>> x = K.random_uniform_variable(shape=(2, 3), low=0, high=1)
>>> y = K.ones((4, 3, 5))
>>> xy = K.dot(x, y)
>>> K.int_shape(xy)
(2, 4, 5)
```
"""
if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):
x_shape = []
for i, s in zip(int_shape(x), array_ops.unstack(array_ops.shape(x))):
if i is not None:
x_shape.append(i)
else:
x_shape.append(s)
x_shape = tuple(x_shape)
y_shape = []
for i, s in zip(int_shape(y), array_ops.unstack(array_ops.shape(y))):
if i is not None:
y_shape.append(i)
else:
y_shape.append(s)
y_shape = tuple(y_shape)
y_permute_dim = list(range(ndim(y)))
y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
xt = array_ops.reshape(x, [-1, x_shape[-1]])
yt = array_ops.reshape(
array_ops.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
return array_ops.reshape(
math_ops.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])
if is_sparse(x):
out = sparse_ops.sparse_tensor_dense_matmul(x, y)
else:
out = math_ops.matmul(x, y)
return out
@keras_export('keras.backend.batch_dot')
def batch_dot(x, y, axes=None):
"""Batchwise dot product.
`batch_dot` is used to compute dot product of `x` and `y` when
`x` and `y` are data in batch, i.e. in a shape of
`(batch_size, :)`.
`batch_dot` results in a tensor or variable with less dimensions
than the input. If the number of dimensions is reduced to 1,
we use `expand_dims` to make sure that ndim is at least 2.
Arguments:
x: Keras tensor or variable with `ndim >= 2`.
y: Keras tensor or variable with `ndim >= 2`.
axes: list of (or single) int with target dimensions.
The lengths of `axes[0]` and `axes[1]` should be the same.
Returns:
A tensor with shape equal to the concatenation of `x`'s shape
(less the dimension that was summed over) and `y`'s shape
(less the batch dimension and the dimension that was summed over).
If the final rank is 1, we reshape it to `(batch_size, 1)`.
Examples:
Assume `x = [[1, 2], [3, 4]]` and `y = [[5, 6], [7, 8]]`
`batch_dot(x, y, axes=1) = [[17, 53]]` which is the main diagonal
of `x.dot(y.T)`, although we never have to calculate the off-diagonal
elements.
Shape inference:
Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`.
If `axes` is (1, 2), to find the output shape of resultant tensor,
loop through each dimension in `x`'s shape and `y`'s shape:
* `x.shape[0]` : 100 : append to output shape
* `x.shape[1]` : 20 : do not append to output shape,
dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1)
* `y.shape[0]` : 100 : do not append to output shape,
always ignore first dimension of `y`
* `y.shape[1]` : 30 : append to output shape
* `y.shape[2]` : 20 : do not append to output shape,
dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2)
`output_shape` = `(100, 30)`
```python
>>> x_batch = K.ones(shape=(32, 20, 1))
>>> y_batch = K.ones(shape=(32, 30, 20))
>>> xy_batch_dot = K.batch_dot(x_batch, y_batch, axes=[1, 2])
>>> K.int_shape(xy_batch_dot)
(32, 1, 30)
```
"""
if isinstance(axes, int):
axes = (axes, axes)
x_ndim = ndim(x)
y_ndim = ndim(y)
if axes is None:
# behaves like tf.batch_matmul as default
axes = [x_ndim - 1, y_ndim - 2]
if x_ndim > y_ndim:
diff = x_ndim - y_ndim
y = array_ops.reshape(y,
array_ops.concat(
[array_ops.shape(y), [1] * (diff)], axis=0))
elif y_ndim > x_ndim:
diff = y_ndim - x_ndim
x = array_ops.reshape(x,
array_ops.concat(
[array_ops.shape(x), [1] * (diff)], axis=0))
else:
diff = 0
if ndim(x) == 2 and ndim(y) == 2:
if axes[0] == axes[1]:
out = math_ops.reduce_sum(math_ops.multiply(x, y), axes[0])
else:
out = math_ops.reduce_sum(
math_ops.multiply(array_ops.transpose(x, [1, 0]), y), axes[1])
else:
adj_x = None if axes[0] == ndim(x) - 1 else True
adj_y = True if axes[1] == ndim(y) - 1 else None
out = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y)
if diff:
if x_ndim > y_ndim:
idx = x_ndim + y_ndim - 3
else:
idx = x_ndim - 1
out = array_ops.squeeze(out, list(range(idx, idx + diff)))
if ndim(out) == 1:
out = expand_dims(out, 1)
return out
@keras_export('keras.backend.transpose')
def transpose(x):
"""Transposes a tensor and returns it.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
Examples:
```python
>>> var = K.variable([[1, 2, 3], [4, 5, 6]])
>>> K.eval(var)
array([[ 1., 2., 3.],
[ 4., 5., 6.]], dtype=float32)
>>> var_transposed = K.transpose(var)
>>> K.eval(var_transposed)
array([[ 1., 4.],
[ 2., 5.],
[ 3., 6.]], dtype=float32)
```
```python
>>> input = K.placeholder((2, 3))
>>> input
<tf.Tensor 'Placeholder_11:0' shape=(2, 3) dtype=float32>
>>> input_transposed = K.transpose(input)
>>> input_transposed
<tf.Tensor 'transpose_4:0' shape=(3, 2) dtype=float32>
```
"""
return array_ops.transpose(x)
@keras_export('keras.backend.gather')
def gather(reference, indices):
"""Retrieves the elements of indices `indices` in the tensor `reference`.
Arguments:
reference: A tensor.
indices: An integer tensor of indices.
Returns:
A tensor of same type as `reference`.
"""
return array_ops.gather(reference, indices)
# ELEMENT-WISE OPERATIONS
@keras_export('keras.backend.max')
def max(x, axis=None, keepdims=False):
"""Maximum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find maximum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with maximum values of `x`.
"""
return math_ops.reduce_max(x, axis, keepdims)
@keras_export('keras.backend.min')
def min(x, axis=None, keepdims=False):
"""Minimum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find minimum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with minimum values of `x`.
"""
return math_ops.reduce_min(x, axis, keepdims)
@keras_export('keras.backend.sum')
def sum(x, axis=None, keepdims=False):
"""Sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to sum over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with sum of `x`.
"""
return math_ops.reduce_sum(x, axis, keepdims)
@keras_export('keras.backend.prod')
def prod(x, axis=None, keepdims=False):
"""Multiplies the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the product of elements of `x`.
"""
return math_ops.reduce_prod(x, axis, keepdims)
@keras_export('keras.backend.cumsum')
def cumsum(x, axis=0):
"""Cumulative sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the sum.
Returns:
A tensor of the cumulative sum of values of `x` along `axis`.
"""
return math_ops.cumsum(x, axis=axis)
@keras_export('keras.backend.cumprod')
def cumprod(x, axis=0):
"""Cumulative product of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
Returns:
A tensor of the cumulative product of values of `x` along `axis`.
"""
return math_ops.cumprod(x, axis=axis)
@keras_export('keras.backend.var')
def var(x, axis=None, keepdims=False):
"""Variance of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the variance.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the variance of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_variance(x, axis=axis, keepdims=keepdims)
@keras_export('keras.backend.std')
def std(x, axis=None, keepdims=False):
"""Standard deviation of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the standard deviation.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the standard deviation of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_std(x, axis=axis, keepdims=keepdims)
@keras_export('keras.backend.mean')
def mean(x, axis=None, keepdims=False):
"""Mean of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: A list of integer. Axes to compute the mean.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1 for each entry in `axis`. If `keepdims` is `True`,
the reduced dimensions are retained with length 1.
Returns:
A tensor with the mean of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_mean(x, axis, keepdims)
@keras_export('keras.backend.any')
def any(x, axis=None, keepdims=False):
"""Bitwise reduction (logical OR).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_any(x, axis, keepdims)
@keras_export('keras.backend.all')
def all(x, axis=None, keepdims=False):
"""Bitwise reduction (logical AND).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_all(x, axis, keepdims)
@keras_export('keras.backend.argmax')
def argmax(x, axis=-1):
"""Returns the index of the maximum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmax(x, axis)
@keras_export('keras.backend.argmin')
def argmin(x, axis=-1):
"""Returns the index of the minimum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmin(x, axis)
@keras_export('keras.backend.square')
def square(x):
"""Element-wise square.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.square(x)
@keras_export('keras.backend.abs')
def abs(x):
"""Element-wise absolute value.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.abs(x)
@keras_export('keras.backend.sqrt')
def sqrt(x):
"""Element-wise square root.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
zero = _constant_to_tensor(0., x.dtype.base_dtype)
inf = _constant_to_tensor(np.inf, x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, inf)
return math_ops.sqrt(x)
@keras_export('keras.backend.exp')
def exp(x):
"""Element-wise exponential.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.exp(x)
@keras_export('keras.backend.log')
def log(x):
"""Element-wise log.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.log(x)
def logsumexp(x, axis=None, keepdims=False):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
This function is more numerically stable than log(sum(exp(x))).
It avoids overflows caused by taking the exp of large inputs and
underflows caused by taking the log of small inputs.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to reduce over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`, the reduced dimension is
retained with length 1.
Returns:
The reduced tensor.
"""
return math_ops.reduce_logsumexp(x, axis, keepdims)
@keras_export('keras.backend.round')
def round(x):
"""Element-wise rounding to the closest integer.
In case of tie, the rounding mode used is "half to even".
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.round(x)
@keras_export('keras.backend.sign')
def sign(x):
"""Element-wise sign.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sign(x)
@keras_export('keras.backend.pow')
def pow(x, a):
"""Element-wise exponentiation.
Arguments:
x: Tensor or variable.
a: Python integer.
Returns:
A tensor.
"""
return math_ops.pow(x, a)
@keras_export('keras.backend.clip')
def clip(x, min_value, max_value):
"""Element-wise value clipping.
Arguments:
x: Tensor or variable.
min_value: Python float or integer.
max_value: Python float or integer.
Returns:
A tensor.
"""
if max_value is not None and max_value < min_value:
max_value = min_value
if max_value is None:
max_value = np.inf
min_value = _constant_to_tensor(min_value, x.dtype.base_dtype)
max_value = _constant_to_tensor(max_value, x.dtype.base_dtype)
return clip_ops.clip_by_value(x, min_value, max_value)
@keras_export('keras.backend.equal')
def equal(x, y):
"""Element-wise equality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.equal(x, y)
@keras_export('keras.backend.not_equal')
def not_equal(x, y):
"""Element-wise inequality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.not_equal(x, y)
@keras_export('keras.backend.greater')
def greater(x, y):
"""Element-wise truth value of (x > y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater(x, y)
@keras_export('keras.backend.greater_equal')
def greater_equal(x, y):
"""Element-wise truth value of (x >= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater_equal(x, y)
@keras_export('keras.backend.less')
def less(x, y):
"""Element-wise truth value of (x < y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less(x, y)
@keras_export('keras.backend.less_equal')
def less_equal(x, y):
"""Element-wise truth value of (x <= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less_equal(x, y)
@keras_export('keras.backend.maximum')
def maximum(x, y):
"""Element-wise maximum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.maximum(x, y)
@keras_export('keras.backend.minimum')
def minimum(x, y):
"""Element-wise minimum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.minimum(x, y)
@keras_export('keras.backend.sin')
def sin(x):
"""Computes sin of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sin(x)
@keras_export('keras.backend.cos')
def cos(x):
"""Computes cos of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.cos(x)
def _regular_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
return normed, mean, var
def _broadcast_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused, broadcast version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(array_ops.shape(x)[axis])
target_shape = array_ops.stack(target_shape)
broadcast_mean = array_ops.reshape(mean, target_shape)
broadcast_var = array_ops.reshape(var, target_shape)
if gamma is None:
broadcast_gamma = None
else:
broadcast_gamma = array_ops.reshape(gamma, target_shape)
if beta is None:
broadcast_beta = None
else:
broadcast_beta = array_ops.reshape(beta, target_shape)
normed = nn.batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma, epsilon)
return normed, mean, var
def _fused_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if list(reduction_axes) == [0, 1, 2]:
normalization_axis = 3
tf_data_format = 'NHWC'
else:
normalization_axis = 1
tf_data_format = 'NCHW'
if gamma is None:
gamma = constant_op.constant(
1.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
if beta is None:
beta = constant_op.constant(
0.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
return nn.fused_batch_norm(
x, gamma, beta, epsilon=epsilon, data_format=tf_data_format)
@keras_export('keras.backend.normalize_batch_in_training')
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]:
if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
return _fused_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
return _regular_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
@keras_export('keras.backend.batch_normalization')
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):
"""Applies batch normalization on x given mean, var, beta and gamma.
I.e. returns:
`output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta`
Arguments:
x: Input tensor or variable.
mean: Mean of batch.
var: Variance of batch.
beta: Tensor with which to center the input.
gamma: Tensor by which to scale the input.
axis: Integer, the axis that should be normalized.
(typically the features axis).
epsilon: Fuzz factor.
Returns:
A tensor.
"""
if ndim(x) == 4:
# The CPU implementation of `fused_batch_norm` only supports NHWC
if axis == 1 or axis == -3:
tf_data_format = 'NCHW'
elif axis == 3 or axis == -1:
tf_data_format = 'NHWC'
else:
tf_data_format = None
if (tf_data_format == 'NHWC' or
tf_data_format == 'NCHW' and _has_nchw_support()):
# The mean / var / beta / gamma tensors may be broadcasted
# so they may have extra axes of size 1, which should be squeezed.
if ndim(mean) > 1:
mean = array_ops.reshape(mean, [-1])
if ndim(var) > 1:
var = array_ops.reshape(var, [-1])
if beta is None:
beta = zeros_like(mean)
elif ndim(beta) > 1:
beta = array_ops.reshape(beta, [-1])
if gamma is None:
gamma = ones_like(mean)
elif ndim(gamma) > 1:
gamma = array_ops.reshape(gamma, [-1])
y, _, _ = nn.fused_batch_norm(
x,
gamma,
beta,
epsilon=epsilon,
mean=mean,
variance=var,
data_format=tf_data_format,
is_training=False
)
return y
return nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
# SHAPE OPERATIONS
@keras_export('keras.backend.concatenate')
def concatenate(tensors, axis=-1):
"""Concatenates a list of tensors alongside the specified axis.
Arguments:
tensors: list of tensors to concatenate.
axis: concatenation axis.
Returns:
A tensor.
"""
if axis < 0:
rank = ndim(tensors[0])
if rank:
axis %= rank
else:
axis = 0
if py_all(is_sparse(x) for x in tensors):
return sparse_ops.sparse_concat(axis, tensors)
else:
return array_ops.concat([to_dense(x) for x in tensors], axis)
@keras_export('keras.backend.reshape')
def reshape(x, shape):
"""Reshapes a tensor to the specified shape.
Arguments:
x: Tensor or variable.
shape: Target shape tuple.
Returns:
A tensor.
"""
return array_ops.reshape(x, shape)
@keras_export('keras.backend.permute_dimensions')
def permute_dimensions(x, pattern):
"""Permutes axes in a tensor.
Arguments:
x: Tensor or variable.
pattern: A tuple of
dimension indices, e.g. `(0, 2, 1)`.
Returns:
A tensor.
"""
return array_ops.transpose(x, perm=pattern)
@keras_export('keras.backend.resize_images')
def resize_images(x, height_factor, width_factor, data_format,
interpolation='nearest'):
"""Resizes the images contained in a 4D tensor.
Arguments:
x: Tensor or variable to resize.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
interpolation: A string, one of `nearest` or `bilinear`.
Returns:
A tensor.
Raises:
ValueError: in case of incorrect value for
`data_format` or `interpolation`.
"""
if data_format == 'channels_first':
rows, cols = 2, 3
elif data_format == 'channels_last':
rows, cols = 1, 2
else:
raise ValueError('Invalid `data_format` argument: %s' % (data_format,))
original_shape = int_shape(x)
new_shape = array_ops.shape(x)[rows:cols + 1]
new_shape *= constant_op.constant(
np.array([height_factor, width_factor], dtype='int32'))
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 2, 3, 1])
if interpolation == 'nearest':
x = image_ops.resize_nearest_neighbor(x, new_shape)
elif interpolation == 'bilinear':
x = image_ops.resize_bilinear(x, new_shape)
else:
raise ValueError('interpolation should be one '
'of "nearest" or "bilinear".')
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 3, 1, 2])
if original_shape[rows] is None:
new_height = None
else:
new_height = original_shape[rows] * height_factor
if original_shape[cols] is None:
new_width = None
else:
new_width = original_shape[cols] * width_factor
if data_format == 'channels_first':
output_shape = (None, None, new_height, new_width)
else:
output_shape = (None, new_height, new_width, None)
x.set_shape(output_shape)
return x
@keras_export('keras.backend.resize_volumes')
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
"""Resizes the volume contained in a 5D tensor.
Arguments:
x: Tensor or variable to resize.
depth_factor: Positive integer.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
Returns:
A tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
if data_format == 'channels_first':
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('Invalid data_format: ' + str(data_format))
@keras_export('keras.backend.repeat_elements')
def repeat_elements(x, rep, axis):
"""Repeats the elements of a tensor along an axis, like `np.repeat`.
If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output
will have shape `(s1, s2 * rep, s3)`.
Arguments:
x: Tensor or variable.
rep: Python integer, number of times to repeat.
axis: Axis along which to repeat.
Returns:
A tensor.
"""
x_shape = x.shape.as_list()
# For static axis
if x_shape[axis] is not None:
# slices along the repeat axis
splits = array_ops.split(value=x,
num_or_size_splits=x_shape[axis],
axis=axis)
# repeat each slice the given number of reps
x_rep = [s for s in splits for _ in range(rep)]
return concatenate(x_rep, axis)
# Here we use tf.tile to mimic behavior of np.repeat so that
# we can handle dynamic shapes (that include None).
# To do that, we need an auxiliary axis to repeat elements along
# it and then merge them along the desired axis.
# Repeating
auxiliary_axis = axis + 1
x_shape = array_ops.shape(x)
x_rep = array_ops.expand_dims(x, axis=auxiliary_axis)
reps = np.ones(len(x.shape) + 1)
reps[auxiliary_axis] = rep
x_rep = array_ops.tile(x_rep, reps)
# Merging
reps = np.delete(reps, auxiliary_axis)
reps[axis] = rep
reps = array_ops.constant(reps, dtype='int32')
x_shape *= reps
x_rep = array_ops.reshape(x_rep, x_shape)
# Fix shape representation
x_shape = x.shape.as_list()
x_rep.set_shape(x_shape)
x_rep._keras_shape = tuple(x_shape)
return x_rep
@keras_export('keras.backend.repeat')
def repeat(x, n):
"""Repeats a 2D tensor.
if `x` has shape (samples, dim) and `n` is `2`,
the output will have shape `(samples, 2, dim)`.
Arguments:
x: Tensor or variable.
n: Python integer, number of times to repeat.
Returns:
A tensor.
"""
assert ndim(x) == 2
x = array_ops.expand_dims(x, 1)
pattern = array_ops.stack([1, n, 1])
return array_ops.tile(x, pattern)
@keras_export('keras.backend.arange')
def arange(start, stop=None, step=1, dtype='int32'):
"""Creates a 1D tensor containing a sequence of integers.
The function arguments use the same convention as
Theano's arange: if only one argument is provided,
it is in fact the "stop" argument and "start" is 0.
The default type of the returned tensor is `'int32'` to
match TensorFlow's default.
Arguments:
start: Start value.
stop: Stop value.
step: Difference between two successive values.
dtype: Integer dtype to use.
Returns:
An integer tensor.
"""
# Match the behavior of numpy and Theano by returning an empty sequence.
if stop is None and start < 0:
start = 0
result = math_ops.range(start, limit=stop, delta=step, name='arange')
if dtype != 'int32':
result = cast(result, dtype)
return result
@keras_export('keras.backend.tile')
def tile(x, n):
"""Creates a tensor by tiling `x` by `n`.
Arguments:
x: A tensor or variable
n: A list of integer. The length must be the same as the number of
dimensions in `x`.
Returns:
A tiled tensor.
"""
if isinstance(n, int):
n = [n]
return array_ops.tile(x, n)
@keras_export('keras.backend.flatten')
def flatten(x):
"""Flatten a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor, reshaped into 1-D
"""
return array_ops.reshape(x, [-1])
@keras_export('keras.backend.batch_flatten')
def batch_flatten(x):
"""Turn a nD tensor into a 2D tensor with same 0th dimension.
In other words, it flattens each data samples of a batch.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
x = array_ops.reshape(x, array_ops.stack([-1, prod(shape(x)[1:])]))
return x
@keras_export('keras.backend.expand_dims')
def expand_dims(x, axis=-1):
"""Adds a 1-sized dimension at index "axis".
Arguments:
x: A tensor or variable.
axis: Position where to add a new axis.
Returns:
A tensor with expanded dimensions.
"""
return array_ops.expand_dims(x, axis)
@keras_export('keras.backend.squeeze')
def squeeze(x, axis):
"""Removes a 1-dimension from the tensor at index "axis".
Arguments:
x: A tensor or variable.
axis: Axis to drop.
Returns:
A tensor with the same data as `x` but reduced dimensions.
"""
return array_ops.squeeze(x, [axis])
@keras_export('keras.backend.temporal_padding')
def temporal_padding(x, padding=(1, 1)):
"""Pads the middle dimension of a 3D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 integers, how many zeros to
add at the start and end of dim 1.
Returns:
A padded 3D tensor.
"""
assert len(padding) == 2
pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.spatial_2d_padding')
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
"""Pads the 2nd and 3rd dimensions of a 4D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 4D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]
else:
pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.spatial_3d_padding')
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
"""Pads 5D tensor with zeros along the depth, height, width dimensions.
Pads these dimensions with respectively
"padding[0]", "padding[1]" and "padding[2]" zeros left and right.
For 'channels_last' data_format,
the 2nd, 3rd and 4th dimension will be padded.
For 'channels_first' data_format,
the 3rd, 4th and 5th dimension will be padded.
Arguments:
x: Tensor or variable.
padding: Tuple of 3 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 5D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]]
else:
pattern = [[0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0],
padding[2][1]], [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.stack')
def stack(x, axis=0):
"""Stacks a list of rank `R` tensors into a rank `R+1` tensor.
Arguments:
x: List of tensors.
axis: Axis along which to perform stacking.
Returns:
A tensor.
"""
return array_ops.stack(x, axis=axis)
@keras_export('keras.backend.one_hot')
def one_hot(indices, num_classes):
"""Computes the one-hot representation of an integer tensor.
Arguments:
indices: nD integer tensor of shape
`(batch_size, dim1, dim2, ... dim(n-1))`
num_classes: Integer, number of classes to consider.
Returns:
(n + 1)D one hot representation of the input
with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`
Returns:
The one-hot tensor.
"""
return array_ops.one_hot(indices, depth=num_classes, axis=-1)
@keras_export('keras.backend.reverse')
def reverse(x, axes):
"""Reverse a tensor along the specified axes.
Arguments:
x: Tensor to reverse.
axes: Integer or iterable of integers.
Axes to reverse.
Returns:
A tensor.
"""
if isinstance(axes, int):
axes = [axes]
return array_ops.reverse(x, axes)
# VALUE MANIPULATION
@keras_export('keras.backend.get_value')
def get_value(x):
"""Returns the value of a variable.
Arguments:
x: input variable.
Returns:
A Numpy array.
"""
if not tensor_util.is_tensor(x):
return x
if context.executing_eagerly():
return x.numpy()
if not getattr(x, '_in_graph_mode', True):
# This is a variable which was created in an eager context, but is being
# evaluated from a Graph.
with context.eager_mode():
return x.numpy()
if ops.executing_eagerly_outside_functions():
# This method of evaluating works inside the Keras FuncGraph.
return function([], x)(x)
return x.eval(session=get_session((x,)))
@keras_export('keras.backend.batch_get_value')
def batch_get_value(tensors):
"""Returns the value of more than one tensor variable.
Arguments:
tensors: list of ops to run.
Returns:
A list of Numpy arrays.
Raises:
RuntimeError: If this method is called inside defun.
"""
if context.executing_eagerly():
return [x.numpy() for x in tensors]
elif ops.inside_function(): # pylint: disable=protected-access
raise RuntimeError('Cannot get value inside Tensorflow graph function.')
if tensors:
return get_session(tensors).run(tensors)
else:
return []
@keras_export('keras.backend.set_value')
def set_value(x, value):
"""Sets the value of a variable, from a Numpy array.
Arguments:
x: Tensor to set to a new value.
value: Value to set the tensor to, as a Numpy array
(of the same shape).
"""
value = np.asarray(value, dtype=dtype(x))
if ops.executing_eagerly_outside_functions():
x.assign(value)
else:
with get_graph().as_default():
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
get_session().run(assign_op, feed_dict={assign_placeholder: value})
@keras_export('keras.backend.batch_set_value')
def batch_set_value(tuples):
"""Sets the values of many tensor variables at once.
Arguments:
tuples: a list of tuples `(tensor, value)`.
`value` should be a Numpy array.
"""
if ops.executing_eagerly_outside_functions():
for x, value in tuples:
x.assign(np.asarray(value, dtype=dtype(x)))
else:
with get_graph().as_default():
if tuples:
assign_ops = []
feed_dict = {}
for x, value in tuples:
value = np.asarray(value, dtype=dtype(x))
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype,
shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
assign_ops.append(assign_op)
feed_dict[assign_placeholder] = value
get_session().run(assign_ops, feed_dict=feed_dict)
@keras_export('keras.backend.print_tensor')
def print_tensor(x, message=''):
"""Prints `message` and the tensor value when evaluated.
Note that `print_tensor` returns a new tensor identical to `x`
which should be used in the following code. Otherwise the
print operation is not taken into account during evaluation.
Example:
```python
>>> x = K.print_tensor(x, message="x is: ")
```
Arguments:
x: Tensor to print.
message: Message to print jointly with the tensor.
Returns:
The same tensor `x`, unchanged.
"""
return logging_ops.Print(x, [x], message)
# GRAPH MANIPULATION
class GraphExecutionFunction(object):
"""Runs a computation graph.
It's possible to pass arguments to `tf.Session.run()` via `session_kwargs`.
In particular additional operations via `fetches` argument and additional
tensor substitutions via `feed_dict` arguments. Note that given
substitutions are merged with substitutions from `inputs`. Even though
`feed_dict` is passed once in the constructor (called in `model.compile()`)
we can modify the values in the dictionary. Through this feed_dict we can
provide additional substitutions besides Keras inputs.
Arguments:
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
name: A name to help users identify what this function does.
session_kwargs: Arguments to `tf.Session.run()`:
`fetches`, `feed_dict`, `options`, `run_metadata`.
"""
def __init__(self, inputs, outputs, updates=None, name=None,
**session_kwargs):
updates = updates or []
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a Keras backend function '
'should be a list or tuple.')
self.inputs = nest.flatten(inputs)
self._outputs_structure = outputs
self.outputs = cast_variables_to_tensor(
nest.flatten(outputs, expand_composites=True))
# TODO(b/127668432): Consider using autograph to generate these
# dependencies in call.
# Index 0 = total loss or model output for `predict`.
with ops.control_dependencies([self.outputs[0]]):
updates_ops = []
for update in updates:
if isinstance(update, tuple):
p, new_p = update
updates_ops.append(state_ops.assign(p, new_p))
else:
# assumed already an op
updates_ops.append(update)
self.updates_op = control_flow_ops.group(*updates_ops)
self.name = name
# additional tensor substitutions
self.feed_dict = session_kwargs.pop('feed_dict', None)
# additional operations
self.fetches = session_kwargs.pop('fetches', [])
if not isinstance(self.fetches, list):
self.fetches = [self.fetches]
self.run_options = session_kwargs.pop('options', None)
self.run_metadata = session_kwargs.pop('run_metadata', None)
# The main use case of `fetches` being passed to a model is the ability
# to run custom updates
# This requires us to wrap fetches in `identity` ops.
self.fetches = [array_ops.identity(x) for x in self.fetches]
self.session_kwargs = session_kwargs
# This mapping keeps track of the function that should receive the
# output from a fetch in `fetches`: { fetch: function(fetch_output) }
# A Callback can use this to register a function with access to the
# output values for a fetch it added.
self.fetch_callbacks = {}
if session_kwargs:
raise ValueError('Some keys in session_kwargs are not supported at this '
'time: %s' % (session_kwargs.keys(),))
self._callable_fn = None
self._feed_arrays = None
self._feed_symbols = None
self._symbol_vals = None
self._fetches = None
self._session = None
def _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session):
"""Generates a callable that runs the graph.
Arguments:
feed_arrays: List of input tensors to be fed Numpy arrays at runtime.
feed_symbols: List of input tensors to be fed symbolic tensors at runtime.
symbol_vals: List of symbolic tensors to be fed to `feed_symbols`.
session: Session to use to generate the callable.
Returns:
Function that runs the graph according to the above options.
"""
# Prepare callable options.
callable_opts = config_pb2.CallableOptions()
# Handle external-data feed.
for x in feed_arrays:
callable_opts.feed.append(x.name)
if self.feed_dict:
for key in sorted(self.feed_dict.keys()):
callable_opts.feed.append(key.name)
# Handle symbolic feed.
for x, y in zip(feed_symbols, symbol_vals):
connection = callable_opts.tensor_connection.add()
if x.dtype != y.dtype:
y = math_ops.cast(y, dtype=x.dtype)
from_tensor = ops._as_graph_element(y)
if from_tensor is None:
from_tensor = y
connection.from_tensor = from_tensor.name # Data tensor
connection.to_tensor = x.name # Placeholder
# Handle fetches.
for x in self.outputs + self.fetches:
callable_opts.fetch.append(x.name)
# Handle updates.
callable_opts.target.append(self.updates_op.name)
# Handle run_options.
if self.run_options:
callable_opts.run_options.CopyFrom(self.run_options)
# Create callable.
callable_fn = session._make_callable_from_options(callable_opts)
# Cache parameters corresponding to the generated callable, so that
# we can detect future mismatches and refresh the callable.
self._callable_fn = callable_fn
self._feed_arrays = feed_arrays
self._feed_symbols = feed_symbols
self._symbol_vals = symbol_vals
self._fetches = list(self.fetches)
self._session = session
def _call_fetch_callbacks(self, fetches_output):
for fetch, output in zip(self._fetches, fetches_output):
if fetch in self.fetch_callbacks:
self.fetch_callbacks[fetch](output)
def _eval_if_composite(self, tensor):
"""Helper method which evaluates any CompositeTensors passed to it."""
# We need to evaluate any composite tensor objects that have been
# reconstructed in 'pack_sequence_as', since otherwise they'll be output as
# actual CompositeTensor objects instead of the value(s) contained in the
# CompositeTensors. E.g., if output_structure contains a SparseTensor, then
# this ensures that we return its value as a SparseTensorValue rather than
# a SparseTensor.
if isinstance(tensor, composite_tensor.CompositeTensor):
return self._session.run(tensor)
else:
return tensor
def __call__(self, inputs):
inputs = nest.flatten(inputs)
session = get_session(inputs)
feed_arrays = []
array_vals = []
feed_symbols = []
symbol_vals = []
for tensor, value in zip(self.inputs, inputs):
if value is None:
continue
if is_sparse(tensor):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1),
np.expand_dims(sparse_coo.col, 1)), 1)
value = (indices, sparse_coo.data, sparse_coo.shape)
if tensor_util.is_tensor(value):
# Case: feeding symbolic tensor.
feed_symbols.append(tensor)
symbol_vals.append(value)
else:
# Case: feeding Numpy array.
feed_arrays.append(tensor)
# We need to do array conversion and type casting at this level, since
# `callable_fn` only supports exact matches.
tensor_type = dtypes_module.as_dtype(tensor.dtype)
array_vals.append(np.asarray(value,
dtype=tensor_type.as_numpy_dtype))
if self.feed_dict:
for key in sorted(self.feed_dict.keys()):
array_vals.append(
np.asarray(self.feed_dict[key], dtype=key.dtype.base_dtype.name))
# Refresh callable if anything has changed.
if (self._callable_fn is None or feed_arrays != self._feed_arrays or
symbol_vals != self._symbol_vals or
feed_symbols != self._feed_symbols or self.fetches != self._fetches or
session != self._session):
self._make_callable(feed_arrays, feed_symbols, symbol_vals, session)
fetched = self._callable_fn(*array_vals,
run_metadata=self.run_metadata)
self._call_fetch_callbacks(fetched[-len(self._fetches):])
output_structure = nest.pack_sequence_as(
self._outputs_structure,
fetched[:len(self.outputs)],
expand_composites=True)
# We need to evaluate any composite tensor objects that have been
# reconstructed in 'pack_sequence_as', since otherwise they'll be output as
# actual CompositeTensor objects instead of the value(s) contained in the
# CompositeTensors. E.g., if output_structure contains a SparseTensor, then
# this ensures that we return its value as a SparseTensorValue rather than
# a SparseTensor.
return nest.map_structure(self._eval_if_composite, output_structure)
class EagerExecutionFunction(object):
"""Helper class for constructing a TF graph function from the Keras graph.
Arguments:
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
name: A name to help users identify what this function does.
session_kwargs: Unsupported.
"""
def __init__(self, inputs, outputs, updates=None, name=None):
self.name = name
self._outputs_structure = outputs
inputs = nest.flatten(inputs)
outputs = nest.flatten(outputs, expand_composites=True)
updates = updates or []
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a Keras backend function '
'should be a list or tuple.')
if updates and not outputs:
# Edge case; never happens in practice
raise ValueError('Cannot create a Keras backend function with updates'
' but no outputs during eager execution.')
graphs = {i.graph for i in nest.flatten([inputs, outputs, updates])
if hasattr(i, 'graph')}
if len(graphs) > 1:
raise ValueError('Cannot create an execution function which is comprised '
'of elements from multiple graphs.')
source_graph = graphs.pop()
global_graph = get_graph()
updates_ops = []
legacy_update_ops = []
for update in updates:
# For legacy reasons it is allowed to pass an update as a tuple
# `(variable, new_value)` (this maps to an assign op). Otherwise it
# is assumed to already be an op -- we cannot control its execution
# order.
if isinstance(update, tuple):
legacy_update_ops.append(update)
else:
if hasattr(update, 'op'):
update = update.op
updates_ops.append(update)
with _scratch_graph() as exec_graph:
global_graph = get_graph()
if source_graph not in (exec_graph, global_graph):
raise ValueError('Unknown graph. Aborting.')
if source_graph is global_graph and exec_graph is not global_graph:
init_tensors = (
outputs + updates_ops + [p for [p, _] in legacy_update_ops] +
[p_new for [_, p_new] in legacy_update_ops
if isinstance(p_new, ops.Tensor)])
lifted_map = lift_to_graph.lift_to_graph(
init_tensors=init_tensors, graph=exec_graph, sources=inputs,
add_sources=True, handle_captures=True, base_graph=source_graph)
inputs = [lifted_map[i] for i in inputs]
outputs = [lifted_map[i] for i in outputs]
updates_ops = [lifted_map[i] for i in updates_ops]
legacy_update_ops = [(lifted_map[p], lifted_map.get(p_new, p_new))
for p, p_new in legacy_update_ops]
# Consolidate updates
with exec_graph.as_default():
outputs = cast_variables_to_tensor(outputs)
with ops.control_dependencies(outputs):
for p, p_new in legacy_update_ops:
updates_ops.append(state_ops.assign(p, p_new))
self.inputs, self.outputs = inputs, outputs
with ops.control_dependencies(updates_ops):
self.outputs[0] = array_ops.identity(self.outputs[0])
exec_graph.inputs = self.inputs + list(exec_graph.captures.values())
exec_graph.outputs = self.outputs
graph_fn = eager_function.ConcreteFunction(exec_graph)
graph_fn._num_positional_args = len(self.inputs)
graph_fn._arg_keywords = []
self._graph_fn = graph_fn
# Handle placeholders with default
# (treated as required placeholder by graph functions)
self._placeholder_default_values = {}
with exec_graph.as_default():
for x in self.inputs:
if x.op.type == 'PlaceholderWithDefault':
self._placeholder_default_values[x] = tensor_util.constant_value(
x.op.inputs[0])
def __call__(self, inputs):
inputs = nest.flatten(inputs)
converted_inputs = []
for tensor, value in zip(self.inputs, inputs):
if value is None:
# Assume `value` is a placeholder with default
value = self._placeholder_default_values.get(tensor, None)
if value is None:
raise ValueError(
'You must feed a value for placeholder %s' % (tensor,))
if not isinstance(value, ops.Tensor):
value = ops.convert_to_tensor(value, dtype=tensor.dtype)
if value.dtype != tensor.dtype:
# Temporary workaround due to `convert_to_tensor` not casting floats.
# See b/119637405
value = math_ops.cast(value, tensor.dtype)
converted_inputs.append(value)
outputs = self._graph_fn(*converted_inputs)
return nest.pack_sequence_as(
self._outputs_structure, [x.numpy() for x in outputs],
expand_composites=True)
@keras_export('keras.backend.function')
def function(inputs, outputs, updates=None, name=None, **kwargs):
"""Instantiates a Keras function.
Arguments:
inputs: List of placeholder tensors.
outputs: List of output tensors.
updates: List of update ops.
name: String, name of function.
**kwargs: Passed to `tf.Session.run`.
Returns:
Output values as Numpy arrays.
Raises:
ValueError: if invalid kwargs are passed in or if in eager execution.
"""
if ops.executing_eagerly_outside_functions():
if kwargs:
raise ValueError('Session keyword arguments are not support during '
'eager execution. You passed: %s' % (kwargs,))
return EagerExecutionFunction(inputs, outputs, updates=updates, name=name)
if kwargs:
for key in kwargs:
if (key not in tf_inspect.getfullargspec(session_module.Session.run)[0]
and key not in ['inputs', 'outputs', 'updates', 'name']):
msg = ('Invalid argument "%s" passed to K.function with TensorFlow '
'backend') % key
raise ValueError(msg)
return GraphExecutionFunction(inputs, outputs, updates=updates, **kwargs)
@keras_export('keras.backend.gradients')
def gradients(loss, variables):
"""Returns the gradients of `loss` w.r.t. `variables`.
Arguments:
loss: Scalar tensor to minimize.
variables: List of variables.
Returns:
A gradients tensor.
"""
return gradients_module.gradients(
loss, variables, colocate_gradients_with_ops=True)
@keras_export('keras.backend.stop_gradient')
def stop_gradient(variables):
"""Returns `variables` but with zero gradient w.r.t. every other variable.
Arguments:
variables: Tensor or list of tensors to consider constant with respect
to any other variable.
Returns:
A single tensor or a list of tensors (depending on the passed argument)
that has no gradient with respect to any other variable.
"""
if isinstance(variables, (list, tuple)):
return map(array_ops.stop_gradient, variables)
return array_ops.stop_gradient(variables)
# CONTROL FLOW
@keras_export('keras.backend.rnn')
def rnn(step_function,
inputs,
initial_states,
go_backwards=False,
mask=None,
constants=None,
unroll=False,
input_length=None,
time_major=False,
zero_output_for_mask=False):
"""Iterates over the time dimension of a tensor.
Arguments:
step_function: RNN step function.
Args;
input; Tensor with shape `(samples, ...)` (no time dimension),
representing input for the batch of samples at a certain
time step.
states; List of tensors.
Returns;
output; Tensor with shape `(samples, output_dim)`
(no time dimension).
new_states; List of tensors, same length and shapes
as 'states'. The first state in the list must be the
output tensor at the previous timestep.
inputs: Tensor of temporal data of shape `(samples, time, ...)`
(at least 3D), or nested tensors, and each of which has shape
`(samples, time, ...)`.
initial_states: Tensor with shape `(samples, state_size)`
(no time dimension), containing the initial values for the states used
in the step function. In the case that state_size is in a nested
shape, the shape of initial_states will also follow the nested
structure.
go_backwards: Boolean. If True, do the iteration over the time
dimension in reverse order and return the reversed sequence.
mask: Binary tensor with shape `(samples, time, 1)`,
with a zero for every element that is masked.
constants: List of constant values passed at each step.
unroll: Whether to unroll the RNN or to use a symbolic `while_loop`.
input_length: If specified, assume time dimension is of this length.
time_major: Boolean. If true, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
zero_output_for_mask: Boolean. If True, the output for masked timestep
will be zeros, whereas in the False case, output from previous
timestep is returned.
Returns:
A tuple, `(last_output, outputs, new_states)`.
last_output: the latest output of the rnn, of shape `(samples, ...)`
outputs: tensor with shape `(samples, time, ...)` where each
entry `outputs[s, t]` is the output of the step function
at time `t` for sample `s`.
new_states: list of tensors, latest states returned by
the step function, of shape `(samples, ...)`.
Raises:
ValueError: if input dimension is less than 3.
ValueError: if `unroll` is `True` but input timestep is not a fixed
number.
ValueError: if `mask` is provided (not `None`) but states is not provided
(`len(states)` == 0).
"""
def swap_batch_timestep(input_t):
# Swap the batch and timestep dim for the incoming tensor.
axes = list(range(len(input_t.shape)))
axes[0], axes[1] = 1, 0
return array_ops.transpose(input_t, axes)
if not time_major:
inputs = nest.map_structure(swap_batch_timestep, inputs)
flatted_inputs = nest.flatten(inputs)
time_steps = flatted_inputs[0].shape[0]
batch = flatted_inputs[0].shape[1]
time_steps_t = array_ops.shape(flatted_inputs[0])[0]
for input_ in flatted_inputs:
input_.shape.with_rank_at_least(3)
if mask is not None:
if mask.dtype != dtypes_module.bool:
mask = math_ops.cast(mask, dtypes_module.bool)
if len(mask.shape) == 2:
mask = expand_dims(mask)
if not time_major:
mask = swap_batch_timestep(mask)
if constants is None:
constants = []
# tf.where needs its condition tensor to be the same shape as its two
# result tensors, but in our case the condition (mask) tensor is
# (nsamples, 1), and inputs are (nsamples, ndimensions) or even more.
# So we need to broadcast the mask to match the shape of inputs.
# That's what the tile call does, it just repeats the mask along its
# second dimension n times.
def _expand_mask(mask_t, input_t, fixed_dim=1):
assert not nest.is_sequence(mask_t)
assert not nest.is_sequence(input_t)
rank_diff = len(input_t.shape) - len(mask_t.shape)
for _ in range(rank_diff):
mask_t = array_ops.expand_dims(mask_t, -1)
multiples = [1] * fixed_dim + input_t.shape.as_list()[fixed_dim:]
return array_ops.tile(mask_t, multiples)
if unroll:
if not time_steps:
raise ValueError('Unrolling requires a fixed number of timesteps.')
states = tuple(initial_states)
successive_states = []
successive_outputs = []
# Process the input tensors. The input tensor need to be split on the
# time_step dim, and reverse if go_backwards is True. In the case of nested
# input, the input is flattened and then transformed individually.
# The result of this will be a tuple of lists, each of the item in tuple is
# list of the tensor with shape (batch, feature)
def _process_single_input_t(input_t):
input_t = array_ops.unstack(input_t) # unstack for time_step dim
if go_backwards:
input_t.reverse()
return input_t
if nest.is_sequence(inputs):
processed_input = nest.map_structure(_process_single_input_t, inputs)
else:
processed_input = (_process_single_input_t(inputs),)
def _get_input_tensor(time):
inp = [t_[time] for t_ in processed_input]
return nest.pack_sequence_as(inputs, inp)
if mask is not None:
mask_list = array_ops.unstack(mask)
if go_backwards:
mask_list.reverse()
for i in range(time_steps):
inp = _get_input_tensor(i)
mask_t = mask_list[i]
output, new_states = step_function(inp,
tuple(states) + tuple(constants))
tiled_mask_t = _expand_mask(mask_t, output)
if not successive_outputs:
prev_output = zeros_like(output)
else:
prev_output = successive_outputs[-1]
output = array_ops.where(tiled_mask_t, output, prev_output)
return_states = []
for state, new_state in zip(states, new_states):
# (see earlier comment for tile explanation)
tiled_mask_t = _expand_mask(mask_t, new_state)
return_states.append(array_ops.where(tiled_mask_t, new_state, state))
states = return_states
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
if zero_output_for_mask:
last_output = array_ops.where(
_expand_mask(mask_list[-1], last_output),
last_output,
zeros_like(last_output))
outputs = array_ops.where(
_expand_mask(mask, outputs, fixed_dim=2),
outputs,
zeros_like(outputs))
else:
for i in range(time_steps):
inp = _get_input_tensor(i)
output, states = step_function(inp, tuple(states) + tuple(constants))
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
else:
states = tuple(initial_states)
# Create input tensor array, if the inputs is nested tensors, then it will
# be flattened first, and tensor array will be created one per flattened
# tensor.
input_ta = tuple(
tensor_array_ops.TensorArray(
dtype=inp.dtype,
size=time_steps_t,
tensor_array_name='input_ta_%s' % i)
for i, inp in enumerate(flatted_inputs))
input_ta = tuple(
ta.unstack(input_) if not go_backwards else ta
.unstack(reverse(input_, 0))
for ta, input_ in zip(input_ta, flatted_inputs))
# Get the time(0) input and compute the output for that, the output will be
# used to determine the dtype of output tensor array. Don't read from
# input_ta due to TensorArray clear_after_read default to True.
input_time_zero = nest.pack_sequence_as(inputs,
[inp[0] for inp in flatted_inputs])
# output_time_zero is used to determine the cell output shape and its dtype.
# the value is discarded.
output_time_zero, _ = step_function(input_time_zero,
initial_states + constants)
output_ta = tuple(
tensor_array_ops.TensorArray(
dtype=out.dtype,
size=time_steps_t,
tensor_array_name='output_ta_%s' % i)
for i, out in enumerate(nest.flatten(output_time_zero)))
time = constant_op.constant(0, dtype='int32', name='time')
while_loop_kwargs = {
'cond': lambda time, *_: time < time_steps_t,
'maximum_iterations': input_length,
'parallel_iterations': 32,
'swap_memory': True,
}
if mask is not None:
if not states:
raise ValueError('No initial states provided! '
'When using masking in an RNN, you should '
'provide initial states '
'(and your step function should return '
'as its first state at time `t` '
'the output at time `t-1`).')
if go_backwards:
mask = reverse(mask, 0)
mask_ta = tensor_array_ops.TensorArray(
dtype=dtypes_module.bool,
size=time_steps_t,
tensor_array_name='mask_ta')
mask_ta = mask_ta.unstack(mask)
# Mask for the T output will be base on the output of T - 1. In the case
# T = 0, a zero filled tensor will be used.
flat_zero_output = tuple(array_ops.zeros_like(o)
for o in nest.flatten(output_time_zero))
def _step(time, output_ta_t, prev_output, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
prev_output: tuple of outputs from time - 1.
*states: List of states.
Returns:
Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
# maybe set shape.
current_input = nest.pack_sequence_as(inputs, current_input)
mask_t = mask_ta.read(time)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
# mask output
flat_output = nest.flatten(output)
flat_mask_output = (flat_zero_output if zero_output_for_mask
else nest.flatten(prev_output))
tiled_mask_t = tuple(_expand_mask(mask_t, o) for o in flat_output)
flat_new_output = tuple(
array_ops.where(m, o, zo) for m, o, zo in zip(
tiled_mask_t, flat_output, flat_mask_output))
# mask states
flat_state = nest.flatten(states)
flat_new_state = nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if hasattr(new_state, 'set_shape'):
new_state.set_shape(state.shape)
tiled_mask_t = tuple(_expand_mask(mask_t, s) for s in flat_state)
flat_final_state = tuple(
array_ops.where(m, s, ps)
for m, s, ps in zip(tiled_mask_t, flat_new_state, flat_state))
new_states = nest.pack_sequence_as(new_states, flat_final_state)
output_ta_t = tuple(
ta.write(time, out)
for ta, out in zip(output_ta_t, flat_new_output))
return (time + 1, output_ta_t,
tuple(flat_new_output)) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
body=_step,
loop_vars=(time, output_ta, flat_zero_output) + states,
**while_loop_kwargs)
# Skip final_outputs[2] which is the output for final timestep.
new_states = final_outputs[3:]
else:
def _step(time, output_ta_t, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
*states: List of states.
Returns:
Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
current_input = nest.pack_sequence_as(inputs, current_input)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
flat_state = nest.flatten(states)
flat_new_state = nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if hasattr(new_state, 'set_shape'):
new_state.set_shape(state.shape)
flat_output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, flat_output))
new_states = nest.pack_sequence_as(initial_states, flat_new_state)
return (time + 1, output_ta_t) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
body=_step,
loop_vars=(time, output_ta) + states,
**while_loop_kwargs)
new_states = final_outputs[2:]
output_ta = final_outputs[1]
outputs = tuple(o.stack() for o in output_ta)
last_output = tuple(o[-1] for o in outputs)
outputs = nest.pack_sequence_as(output_time_zero, outputs)
last_output = nest.pack_sequence_as(output_time_zero, last_output)
# static shape inference
def set_shape(output_):
if hasattr(output_, 'set_shape'):
shape = output_.shape.as_list()
shape[0] = time_steps
shape[1] = batch
output_.set_shape(shape)
return output_
outputs = nest.map_structure(set_shape, outputs)
if not time_major:
outputs = nest.map_structure(swap_batch_timestep, outputs)
return last_output, outputs, new_states
@keras_export('keras.backend.switch')
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value.
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
Arguments:
condition: tensor (`int` or `bool`).
then_expression: either a tensor, or a callable that returns a tensor.
else_expression: either a tensor, or a callable that returns a tensor.
Returns:
The selected tensor.
Raises:
ValueError: If rank of `condition` is greater than rank of expressions.
"""
if condition.dtype != dtypes_module.bool:
condition = math_ops.cast(condition, 'bool')
cond_ndim = ndim(condition)
if not cond_ndim:
if not callable(then_expression):
def then_expression_fn():
return then_expression
else:
then_expression_fn = then_expression
if not callable(else_expression):
def else_expression_fn():
return else_expression
else:
else_expression_fn = else_expression
x = control_flow_ops.cond(condition, then_expression_fn, else_expression_fn)
else:
# tf.where needs its condition tensor
# to be the same shape as its two
# result tensors
if callable(then_expression):
then_expression = then_expression()
if callable(else_expression):
else_expression = else_expression()
expr_ndim = ndim(then_expression)
if cond_ndim > expr_ndim:
raise ValueError('Rank of `condition` should be less than or'
' equal to rank of `then_expression` and '
'`else_expression`. ndim(condition)=' + str(cond_ndim) +
', ndim(then_expression)'
'=' + str(expr_ndim))
if cond_ndim > 1:
ndim_diff = expr_ndim - cond_ndim
cond_shape = array_ops.concat(
[array_ops.shape(condition), [1] * ndim_diff], axis=0)
condition = array_ops.reshape(condition, cond_shape)
expr_shape = array_ops.shape(then_expression)
shape_diff = expr_shape - cond_shape
tile_shape = array_ops.where(shape_diff > 0, expr_shape,
array_ops.ones_like(expr_shape))
condition = array_ops.tile(condition, tile_shape)
x = array_ops.where(condition, then_expression, else_expression)
return x
@keras_export('keras.backend.in_train_phase')
def in_train_phase(x, alt, training=None):
"""Selects `x` in train phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in train phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on the `training` flag.
the `training` flag defaults to `K.learning_phase()`.
"""
if training is None:
training = learning_phase()
if training == 1 or training is True:
if callable(x):
return x()
else:
return x
elif training == 0 or training is False:
if callable(alt):
return alt()
else:
return alt
# else: assume learning phase is a placeholder tensor.
x = switch(training, x, alt)
return x
@keras_export('keras.backend.in_test_phase')
def in_test_phase(x, alt, training=None):
"""Selects `x` in test phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in test phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on `K.learning_phase`.
"""
return in_train_phase(alt, x, training=training)
# NN OPERATIONS
@keras_export('keras.backend.relu')
def relu(x, alpha=0., max_value=None, threshold=0):
"""Rectified linear unit.
With default values, it returns element-wise `max(x, 0)`.
Otherwise, it follows:
`f(x) = max_value` for `x >= max_value`,
`f(x) = x` for `threshold <= x < max_value`,
`f(x) = alpha * (x - threshold)` otherwise.
Arguments:
x: A tensor or variable.
alpha: A scalar, slope of negative section (default=`0.`).
max_value: float. Saturation threshold.
threshold: float. Threshold value for thresholded activation.
Returns:
A tensor.
"""
if alpha != 0.:
if max_value is None and threshold == 0:
return nn.leaky_relu(x, alpha=alpha)
if threshold != 0:
negative_part = nn.relu(-x + threshold)
else:
negative_part = nn.relu(-x)
clip_max = max_value is not None
if threshold != 0:
# computes x for x > threshold else 0
x = x * math_ops.cast(math_ops.greater(x, threshold), floatx())
elif max_value == 6:
# if no threshold, then can use nn.relu6 native TF op for performance
x = nn.relu6(x)
clip_max = False
else:
x = nn.relu(x)
if clip_max:
max_value = _constant_to_tensor(max_value, x.dtype.base_dtype)
zero = _constant_to_tensor(0., x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, max_value)
if alpha != 0.:
alpha = _to_tensor(alpha, x.dtype.base_dtype)
x -= alpha * negative_part
return x
@keras_export('keras.backend.elu')
def elu(x, alpha=1.):
"""Exponential linear unit.
Arguments:
x: A tensor or variable to compute the activation function for.
alpha: A scalar, slope of negative section.
Returns:
A tensor.
"""
res = nn.elu(x)
if alpha == 1:
return res
else:
return array_ops.where(x > 0, res, alpha * res)
@keras_export('keras.backend.softmax')
def softmax(x, axis=-1):
"""Softmax of a tensor.
Arguments:
x: A tensor or variable.
axis: The dimension softmax would be performed on.
The default is -1 which indicates the last dimension.
Returns:
A tensor.
"""
return nn.softmax(x, axis=axis)
@keras_export('keras.backend.softplus')
def softplus(x):
"""Softplus of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softplus(x)
@keras_export('keras.backend.softsign')
def softsign(x):
"""Softsign of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softsign(x)
@keras_export('keras.backend.categorical_crossentropy')
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor of the same shape as `output`.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Softmax'):
axis = axis % len(output.shape)
# scale preds so that the class probas of each sample sum to 1
output = output / math_ops.reduce_sum(output, axis, True)
# Compute cross entropy from probabilities.
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
return -math_ops.reduce_sum(target * math_ops.log(output), axis)
else:
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
# See b/117284466
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
return nn.softmax_cross_entropy_with_logits_v2(labels=target, logits=output)
@keras_export('keras.backend.sparse_categorical_crossentropy')
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy with integer targets.
Arguments:
target: An integer tensor.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Softmax'):
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)
output = math_ops.log(output)
else:
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
# See b/117284466
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
rank = len(output.shape)
axis = axis % rank
if axis != rank - 1:
permutation = list(range(axis)) + list(range(axis + 1, rank)) + [axis]
output = array_ops.transpose(output, perm=permutation)
output_shape = output.shape
targets = cast(flatten(target), 'int64')
logits = array_ops.reshape(output, [-1, int(output_shape[-1])])
res = nn.sparse_softmax_cross_entropy_with_logits(
labels=targets, logits=logits)
if len(output_shape) >= 3:
# If our output includes timesteps or spatial dimensions we need to reshape
return array_ops.reshape(res, array_ops.shape(output)[:-1])
else:
return res
@keras_export('keras.backend.binary_crossentropy')
def binary_crossentropy(target, output, from_logits=False):
"""Binary crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor with the same shape as `output`.
output: A tensor.
from_logits: Whether `output` is expected to be a logits tensor.
By default, we consider that `output`
encodes a probability distribution.
Returns:
A tensor.
"""
if not from_logits:
if (isinstance(output, (ops.EagerTensor, variables_module.Variable)) or
output.op.type != 'Sigmoid'):
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
# Compute cross entropy from probabilities.
bce = target * math_ops.log(output + epsilon())
bce += (1 - target) * math_ops.log(1 - output + epsilon())
return -bce
else:
# When sigmoid activation function is used for output operation, we
# use logits from the sigmoid function directly to compute loss in order
# to prevent collapsing zero when training.
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
@keras_export('keras.backend.sigmoid')
def sigmoid(x):
"""Element-wise sigmoid.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.sigmoid(x)
@keras_export('keras.backend.hard_sigmoid')
def hard_sigmoid(x):
"""Segment-wise linear approximation of sigmoid.
Faster than sigmoid.
Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.
In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
point_two = _constant_to_tensor(0.2, x.dtype.base_dtype)
point_five = _constant_to_tensor(0.5, x.dtype.base_dtype)
x = math_ops.mul(x, point_two)
x = math_ops.add(x, point_five)
x = clip_ops.clip_by_value(x, 0., 1.)
return x
@keras_export('keras.backend.tanh')
def tanh(x):
"""Element-wise tanh.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.tanh(x)
@keras_export('keras.backend.dropout')
def dropout(x, level, noise_shape=None, seed=None):
"""Sets entries in `x` to zero at random, while scaling the entire tensor.
Arguments:
x: tensor
level: fraction of the entries in the tensor
that will be set to 0.
noise_shape: shape for randomly generated keep/drop flags,
must be broadcastable to the shape of `x`
seed: random seed to ensure determinism.
Returns:
A tensor.
"""
if seed is None:
seed = np.random.randint(10e6)
return nn.dropout_v2(x, rate=level, noise_shape=noise_shape, seed=seed)
@keras_export('keras.backend.l2_normalize')
def l2_normalize(x, axis=None):
"""Normalizes a tensor wrt the L2 norm alongside the specified axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform normalization.
Returns:
A tensor.
"""
return nn.l2_normalize(x, axis=axis)
@keras_export('keras.backend.in_top_k')
def in_top_k(predictions, targets, k):
"""Returns whether the `targets` are in the top `k` `predictions`.
Arguments:
predictions: A tensor of shape `(batch_size, classes)` and type `float32`.
targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
k: An `int`, number of top elements to consider.
Returns:
A 1D tensor of length `batch_size` and type `bool`.
`output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`
values of `predictions[i]`.
"""
return nn.in_top_k(predictions, targets, k)
# CONVOLUTIONS
def _preprocess_conv1d_input(x, data_format):
"""Transpose and cast the input before the conv1d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NWC' # to pass TF Conv2dNative operations
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 1)) # NCW -> NWC
else:
tf_data_format = 'NCW'
return x, tf_data_format
def _preprocess_conv2d_input(x, data_format, force_transpose=False):
"""Transpose and cast the input before the conv2d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
force_transpose: Boolean. If True, the input will always be transposed
from NCHW to NHWC if `data_format` is `"channels_first"`.
If False, the transposition only occurs on CPU (GPU ops are
assumed to support NCHW).
Returns:
A tensor.
"""
tf_data_format = 'NHWC'
if data_format == 'channels_first':
if not _has_nchw_support() or force_transpose:
x = array_ops.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC
else:
tf_data_format = 'NCHW'
return x, tf_data_format
def _preprocess_conv3d_input(x, data_format):
"""Transpose and cast the input before the conv3d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NDHWC'
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 3, 4, 1))
else:
tf_data_format = 'NCDHW'
return x, tf_data_format
def _preprocess_padding(padding):
"""Convert keras' padding to TensorFlow's padding.
Arguments:
padding: string, one of 'same' , 'valid'
Returns:
a string, one of 'SAME', 'VALID'.
Raises:
ValueError: if invalid `padding'`
"""
if padding == 'same':
padding = 'SAME'
elif padding == 'valid':
padding = 'VALID'
else:
raise ValueError('Invalid padding: ' + str(padding))
return padding
@keras_export('keras.backend.conv1d')
def conv1d(x,
kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: stride integer.
padding: string, `"same"`, `"causal"` or `"valid"`.
data_format: string, one of "channels_last", "channels_first".
dilation_rate: integer dilate rate.
Returns:
A tensor, result of 1D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
kernel_shape = kernel.shape.as_list()
if padding == 'causal':
# causal (dilated) convolution:
left_pad = dilation_rate * (kernel_shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
padding = _preprocess_padding(padding)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NWC':
x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@keras_export('keras.backend.conv2d')
def conv2d(x,
kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow data format
for inputs/kernels/outputs.
dilation_rate: tuple of 2 integers.
Returns:
A tensor, result of 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.conv2d_transpose')
def conv2d_transpose(x,
kernel,
output_shape,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D deconvolution (i.e.
transposed convolution).
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow/CNTK data format
for inputs/kernels/outputs.
dilation_rate: Tuple of 2 integers.
Returns:
A tensor, result of transposed 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(output_shape)
# `atrous_conv2d_transpose` only supports NHWC format, even on GPU.
if data_format == 'channels_first' and dilation_rate != (1, 1):
force_transpose = True
else:
force_transpose = False
x, tf_data_format = _preprocess_conv2d_input(x, data_format, force_transpose)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[1])
if output_shape[0] is None:
output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
if dilation_rate == (1, 1):
x = nn.conv2d_transpose(x, kernel, output_shape, strides,
padding=padding,
data_format=tf_data_format)
else:
assert dilation_rate[0] == dilation_rate[1]
x = nn.atrous_conv2d_transpose(
x,
kernel,
output_shape,
rate=dilation_rate[0],
padding=padding)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def separable_conv1d(x,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: stride integer.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: integer dilation rate.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(strides, int):
strides = (strides,)
if isinstance(dilation_rate, int):
dilation_rate = (dilation_rate,)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == 'NWC':
spatial_start_dim = 1
strides = (1,) + strides * 2 + (1,)
else:
spatial_start_dim = 2
strides = (1, 1) + strides * 2
x = array_ops.expand_dims(x, spatial_start_dim)
depthwise_kernel = array_ops.expand_dims(depthwise_kernel, 0)
pointwise_kernel = array_ops.expand_dims(pointwise_kernel, 0)
dilation_rate = (1,) + dilation_rate
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
x = array_ops.squeeze(x, [spatial_start_dim])
if data_format == 'channels_first' and tf_data_format == 'NWC':
x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@keras_export('keras.backend.separable_conv2d')
def separable_conv2d(x,
depthwise_kernel,
pointwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
ValueError: if `strides` is not a tuple of 2 integers.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if len(strides) != 2:
raise ValueError('`strides` must be a tuple of 2 integers.')
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def depthwise_conv2d(x,
depthwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.depthwise_conv2d(
x,
depthwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.conv3d')
def conv3d(x,
kernel,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1)):
"""3D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow/CNTK data format
for inputs/kernels/outputs.
dilation_rate: tuple of 3 integers.
Returns:
A tensor, result of 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def conv3d_transpose(x,
kernel,
output_shape,
strides=(1, 1, 1),
padding='valid',
data_format=None):
"""3D deconvolution (i.e.
transposed convolution).
Arguments:
x: input tensor.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, "same" or "valid".
data_format: string, `"channels_last"` or `"channels_first"`.
Whether to use Theano or TensorFlow/CNTK data format
for inputs/kernels/outputs.
Returns:
A tensor, result of transposed 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(output_shape)
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[4], output_shape[1])
if output_shape[0] is None:
output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.conv3d_transpose(
x,
kernel,
output_shape,
strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
@keras_export('keras.backend.pool2d')
def pool2d(x,
pool_size,
strides=(1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""2D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 2 integers.
strides: tuple of 2 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 2D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_size` is not a tuple of 2 integers.
ValueError: if `strides` is not a tuple of 2 integers.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if len(pool_size) != 2:
raise ValueError('`pool_size` must be a tuple of 2 integers.')
if len(strides) != 2:
raise ValueError('`strides` must be a tuple of 2 integers.')
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.pool3d')
def pool3d(x,
pool_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""3D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 3 integers.
strides: tuple of 3 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 3D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply N-D convolution with un-shared weights.
Arguments:
inputs: (N+2)-D tensor with shape
(batch_size, channels_in, d_in1, ..., d_inN)
if data_format='channels_first', or
(batch_size, d_in1, ..., d_inN, channels_in)
if data_format='channels_last'.
kernel: the unshared weight for N-D convolution,
with shape (output_items, feature_dim, channels_out), where
feature_dim = np.prod(kernel_size) * channels_in,
output_items = np.prod(output_shape).
kernel_size: a tuple of N integers, specifying the
spatial dimensions of the N-D convolution window.
strides: a tuple of N integers, specifying the strides
of the convolution along the spatial dimensions.
output_shape: a tuple of (d_out1, ..., d_outN) specifying the spatial
dimensionality of the output.
data_format: string, "channels_first" or "channels_last".
Returns:
An (N+2)-D tensor with shape:
(batch_size, channels_out) + output_shape
if data_format='channels_first', or:
(batch_size,) + output_shape + (channels_out,)
if data_format='channels_last'.
Raises:
ValueError: if `data_format` is neither
`channels_last` nor `channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
kernel_shape = int_shape(kernel)
feature_dim = kernel_shape[1]
channels_out = kernel_shape[-1]
ndims = len(output_shape)
spatial_dimensions = list(range(ndims))
xs = []
output_axes_ticks = [range(axis_max) for axis_max in output_shape]
for position in itertools.product(*output_axes_ticks):
slices = [slice(None)]
if data_format == 'channels_first':
slices.append(slice(None))
slices.extend([slice(position[d] * strides[d],
position[d] * strides[d] + kernel_size[d])
for d in spatial_dimensions])
if data_format == 'channels_last':
slices.append(slice(None))
xs.append(reshape(inputs[slices], (1, -1, feature_dim)))
x_aggregate = concatenate(xs, axis=0)
output = batch_dot(x_aggregate, kernel)
output = reshape(output, output_shape + (-1, channels_out))
if data_format == 'channels_first':
permutation = [ndims, ndims + 1] + spatial_dimensions
else:
permutation = [ndims] + spatial_dimensions + [ndims + 1]
return permute_dimensions(output, permutation)
@keras_export('keras.backend.local_conv1d')
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):
"""Apply 1D conv with un-shared weights.
Arguments:
inputs: 3D tensor with shape:
(batch_size, steps, input_dim)
if data_format is "channels_last" or
(batch_size, input_dim, steps)
if data_format is "channels_first".
kernel: the unshared weight for convolution,
with shape (output_length, feature_dim, filters).
kernel_size: a tuple of a single integer,
specifying the length of the 1D convolution window.
strides: a tuple of a single integer,
specifying the stride length of the convolution.
data_format: the data format, channels_first or channels_last.
Returns:
A 3d tensor with shape:
(batch_size, output_length, filters)
if data_format='channels_first'
or 3D tensor with shape:
(batch_size, filters, output_length)
if data_format='channels_last'.
"""
output_shape = (kernel.shape[0],)
return local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format)
@keras_export('keras.backend.local_conv2d')
def local_conv2d(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply 2D conv with un-shared weights.
Arguments:
inputs: 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
kernel: the unshared weight for convolution,
with shape (output_items, feature_dim, filters).
kernel_size: a tuple of 2 integers, specifying the
width and height of the 2D convolution window.
strides: a tuple of 2 integers, specifying the strides
of the convolution along the width and height.
output_shape: a tuple with (output_row, output_col).
data_format: the data format, channels_first or channels_last.
Returns:
A 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
"""
return local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format)
@keras_export('keras.backend.bias_add')
def bias_add(x, bias, data_format=None):
"""Adds a bias vector to a tensor.
Arguments:
x: Tensor or variable.
bias: Bias tensor to add.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
Output tensor.
Raises:
ValueError: In one of the two cases below:
1. invalid `data_format` argument.
2. invalid bias shape.
the bias should be either a vector or
a tensor with ndim(x) - 1 dimension
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
bias_shape = int_shape(bias)
if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1:
raise ValueError(
'Unexpected bias dimensions %d, expect to be 1 or %d dimensions' %
(len(bias_shape), ndim(x)))
# pylint: disable=g-no-augmented-assignment
if ndim(x) == 5:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, bias_shape[0], 1, 1, 1))
else:
x = x + reshape(bias, (1, bias_shape[3]) + bias_shape[:3])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, 1, 1, bias_shape[0]))
else:
x = x + reshape(bias, (1,) + bias_shape)
elif ndim(x) == 4:
if data_format == 'channels_first':
if len(bias_shape) == 1:
if _has_nchw_support():
x = nn.bias_add(x, bias, data_format='NCHW')
else:
x = x + reshape(bias, (1, bias_shape[0], 1, 1))
else:
x = x + reshape(bias, (1, bias_shape[2]) + bias_shape[:2])
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = nn.bias_add(x, bias, data_format='NHWC')
else:
x = x + reshape(bias, (1,) + bias_shape)
elif ndim(x) == 3:
if data_format == 'channels_first':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, bias_shape[0], 1))
else:
x = x + reshape(bias, (1, bias_shape[1], bias_shape[0]))
elif data_format == 'channels_last':
if len(bias_shape) == 1:
x = x + reshape(bias, (1, 1, bias_shape[0]))
else:
x = x + reshape(bias, (1,) + bias_shape)
else:
x = nn.bias_add(x, bias)
# pylint: enable=g-no-augmented-assignment
return x
# RANDOMNESS
@keras_export('keras.backend.random_normal')
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with normal distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: A float, mean of the normal distribution to draw samples.
stddev: A float, standard deviation of the normal distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
@keras_export('keras.backend.random_uniform')
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
"""Returns a tensor with uniform distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
minval: A float, lower boundary of the uniform distribution
to draw samples.
maxval: A float, upper boundary of the uniform distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
@keras_export('keras.backend.random_binomial')
def random_binomial(shape, p=0.0, dtype=None, seed=None):
"""Returns a tensor with random binomial distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
p: A float, `0. <= p <= 1`, probability of binomial distribution.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return array_ops.where(
random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p,
array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype))
@keras_export('keras.backend.truncated_normal')
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with truncated random normal distribution of values.
The generated values follow a normal distribution
with specified mean and standard deviation,
except that values whose magnitude is more than
two standard deviations from the mean are dropped and re-picked.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: Mean of the values.
stddev: Standard deviation of the values.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.truncated_normal(
shape, mean, stddev, dtype=dtype, seed=seed)
# CTC
# TensorFlow has a native implementation, but it uses sparse tensors
# and therefore requires a wrapper for Keras. The functions below convert
# dense to sparse tensors and also wraps up the beam search code that is
# in TensorFlow's CTC implementation
@keras_export('keras.backend.ctc_label_dense_to_sparse')
def ctc_label_dense_to_sparse(labels, label_lengths):
"""Converts CTC labels from dense to sparse.
Arguments:
labels: dense CTC labels.
label_lengths: length of the labels.
Returns:
A sparse tensor representation of the labels.
"""
label_shape = array_ops.shape(labels)
num_batches_tns = array_ops.stack([label_shape[0]])
max_num_labels_tns = array_ops.stack([label_shape[1]])
def range_less_than(_, current_input):
return array_ops.expand_dims(
math_ops.range(label_shape[1]), 0) < array_ops.fill(
max_num_labels_tns, current_input)
init = math_ops.cast(
array_ops.fill([1, label_shape[1]], 0), dtypes_module.bool)
dense_mask = functional_ops.scan(
range_less_than, label_lengths, initializer=init, parallel_iterations=1)
dense_mask = dense_mask[:, 0, :]
label_array = array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[1]), num_batches_tns),
label_shape)
label_ind = array_ops.boolean_mask(label_array, dense_mask)
batch_array = array_ops.transpose(
array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[0]), max_num_labels_tns),
reverse(label_shape, 0)))
batch_ind = array_ops.boolean_mask(batch_array, dense_mask)
indices = array_ops.transpose(
array_ops.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1]))
vals_sparse = array_ops.gather_nd(labels, indices)
return sparse_tensor.SparseTensor(
math_ops.cast(indices, dtypes_module.int64), vals_sparse,
math_ops.cast(label_shape, dtypes_module.int64))
@keras_export('keras.backend.ctc_batch_cost')
def ctc_batch_cost(y_true, y_pred, input_length, label_length):
"""Runs CTC loss algorithm on each batch element.
Arguments:
y_true: tensor `(samples, max_string_length)`
containing the truth labels.
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_pred`.
label_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_true`.
Returns:
Tensor with shape (samples,1) containing the
CTC loss of each element.
"""
label_length = math_ops.cast(
array_ops.squeeze(label_length, axis=-1), dtypes_module.int32)
input_length = math_ops.cast(
array_ops.squeeze(input_length, axis=-1), dtypes_module.int32)
sparse_labels = math_ops.cast(
ctc_label_dense_to_sparse(y_true, label_length), dtypes_module.int32)
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
return array_ops.expand_dims(
ctc.ctc_loss(
inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1)
@keras_export('keras.backend.ctc_decode')
def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):
"""Decodes the output of a softmax.
Can use either greedy search (also known as best path)
or a constrained dictionary search.
Arguments:
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, )` containing the sequence length for
each batch item in `y_pred`.
greedy: perform much faster best-path search if `true`.
This does not use a dictionary.
beam_width: if `greedy` is `false`: a beam search decoder will be used
with a beam of this width.
top_paths: if `greedy` is `false`,
how many of the most probable paths will be returned.
Returns:
Tuple:
List: if `greedy` is `true`, returns a list of one element that
contains the decoded sequence.
If `false`, returns the `top_paths` most probable
decoded sequences.
Important: blank labels are returned as `-1`.
Tensor `(top_paths, )` that contains
the log probability of each decoded sequence.
"""
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
input_length = math_ops.cast(input_length, dtypes_module.int32)
if greedy:
(decoded, log_prob) = ctc.ctc_greedy_decoder(
inputs=y_pred, sequence_length=input_length)
else:
(decoded, log_prob) = ctc.ctc_beam_search_decoder(
inputs=y_pred,
sequence_length=input_length,
beam_width=beam_width,
top_paths=top_paths)
decoded_dense = [
sparse_ops.sparse_to_dense(
st.indices, st.dense_shape, st.values, default_value=-1)
for st in decoded
]
return (decoded_dense, log_prob)
# HIGH ORDER FUNCTIONS
@keras_export('keras.backend.map_fn')
def map_fn(fn, elems, name=None, dtype=None):
"""Map the function fn over the elements elems and return the outputs.
Arguments:
fn: Callable that will be called upon each element in elems
elems: tensor
name: A string name for the map node in the graph
dtype: Output data type.
Returns:
Tensor with dtype `dtype`.
"""
return map_fn_lib.map_fn(fn, elems, name=name, dtype=dtype)
@keras_export('keras.backend.foldl')
def foldl(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from left to right.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[0]` in case of None)
name: A string name for the foldl node in the graph
Returns:
Tensor with same type and shape as `initializer`.
"""
return functional_ops.foldl(fn, elems, initializer=initializer, name=name)
@keras_export('keras.backend.foldr')
def foldr(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from right to left.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[-1]` in case of None)
name: A string name for the foldr node in the graph
Returns:
Same type and shape as initializer
"""
return functional_ops.foldr(fn, elems, initializer=initializer, name=name)
# Load Keras default configuration from config file if present.
# Set Keras base dir path given KERAS_HOME env variable, if applicable.
# Otherwise either ~/.keras or /tmp.
if 'KERAS_HOME' in os.environ:
_keras_dir = os.environ.get('KERAS_HOME')
else:
_keras_base_dir = os.path.expanduser('~')
_keras_dir = os.path.join(_keras_base_dir, '.keras')
_config_path = os.path.expanduser(os.path.join(_keras_dir, 'keras.json'))
if os.path.exists(_config_path):
try:
_config = json.load(open(_config_path))
except ValueError:
_config = {}
_floatx = _config.get('floatx', floatx())
assert _floatx in {'float16', 'float32', 'float64'}
_epsilon = _config.get('epsilon', epsilon())
assert isinstance(_epsilon, float)
_image_data_format = _config.get('image_data_format', image_data_format())
assert _image_data_format in {'channels_last', 'channels_first'}
set_floatx(_floatx)
set_epsilon(_epsilon)
set_image_data_format(_image_data_format)
# Save config file.
if not os.path.exists(_keras_dir):
try:
os.makedirs(_keras_dir)
except OSError:
# Except permission denied and potential race conditions
# in multi-threaded environments.
pass
if not os.path.exists(_config_path):
_config = {
'floatx': floatx(),
'epsilon': epsilon(),
'backend': 'tensorflow',
'image_data_format': image_data_format()
}
try:
with open(_config_path, 'w') as f:
f.write(json.dumps(_config, indent=4))
except IOError:
# Except permission denied.
pass
def in_multi_worker_mode():
"""Whether we are operating in a Multi-Worker setting."""
tf_config = json.loads(os.environ.get('TF_CONFIG', '{}'))
cluster_spec = server_lib.ClusterSpec(tf_config.get('cluster', {}))
return tf_config and 'master' not in cluster_spec.jobs
def configure_and_create_distributed_session(distribution_strategy):
"""Configure session config and create a session with it."""
def _create_session(distribution_strategy):
"""Create the Distributed Strategy session."""
session_config = get_default_session_config()
# If a session already exists, merge in its config; in the case there is a
# conflict, take values of the existing config.
global _SESSION
if getattr(_SESSION, 'session', None) and _SESSION.session._config:
session_config.MergeFrom(_SESSION.session._config)
if is_tpu_strategy(distribution_strategy):
# TODO(priyag, yuefengz): Remove this workaround when Distribute
# Coordinator is integrated with keras and we can create a session from
# there.
distribution_strategy.configure(session_config)
master = distribution_strategy.extended._tpu_cluster_resolver.master() # pylint: disable=protected-access
session = session_module.Session(config=session_config, target=master)
else:
worker_context = dc_context.get_current_worker_context()
if worker_context:
dc_session_config = worker_context.session_config
# Merge the default session config to the one from distribute
# coordinator, which is fine for now since they don't have
# conflicting configurations.
dc_session_config.MergeFrom(session_config)
session = session_module.Session(
config=dc_session_config, target=worker_context.master_target)
else:
distribution_strategy.configure(session_config)
session = session_module.Session(config=session_config)
set_session(session)
if in_multi_worker_mode():
dc.run_distribute_coordinator(
_create_session,
distribution_strategy,
mode=dc.CoordinatorMode.INDEPENDENT_WORKER)
else:
_create_session(distribution_strategy)
def is_tpu_strategy(strategy):
"""We're executing TPU Strategy."""
return (strategy is not None and
strategy.__class__.__name__.startswith('TPUStrategy'))
def cast_variables_to_tensor(tensors):
def _cast_variables_to_tensor(tensor):
if isinstance(tensor, variables_module.Variable):
return array_ops.identity(tensor)
return tensor
return nest.map_structure(_cast_variables_to_tensor, tensors)
| 31.304172 | 112 | 0.662829 |
7941f762528b4b8f00d12a834fc791b54d351b32 | 4,984 | py | Python | actionserver/actions/action_complaintform.py | Ajju2211/Restaurant-Bot-Automation | 64eed75f26bebaf7cf755790d3f6d012b452c2d3 | [
"Apache-2.0"
] | null | null | null | actionserver/actions/action_complaintform.py | Ajju2211/Restaurant-Bot-Automation | 64eed75f26bebaf7cf755790d3f6d012b452c2d3 | [
"Apache-2.0"
] | null | null | null | actionserver/actions/action_complaintform.py | Ajju2211/Restaurant-Bot-Automation | 64eed75f26bebaf7cf755790d3f6d012b452c2d3 | [
"Apache-2.0"
] | null | null | null | from typing import Any, Text, Dict, List, Union
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.forms import FormAction
from rasa_sdk.events import UserUtteranceReverted, UserUttered, FollowupAction
# from rasa_core.events import (UserUtteranceReverted, UserUttered,
# ActionExecuted, Event)
from rasa_sdk.events import AllSlotsReset, SlotSet
from rasa.core.constants import REQUESTED_SLOT
from rasa.core.slots import Slot
import pandas as pd
import json
from actionserver.utils import utilities as util
from actionserver.controllers.faqs.faq import FAQ
from actionserver.controllers.constants.orderForm import *
import logging
from actionserver.utils.utilities import INVALID_VALUE
dish_list = []
quant_list = [] # takes quantity from user
logger = logging.getLogger(__name__)
with open(r'.\actionserver\custom_payload.json') as f:
restaurant_menu = json.load(f)
def query_back(dispatcher):
dispatcher.utter_message("Going back to queries!!!")
greet_utter = UserUttered(text="/greet", parse_data={
"intent": {"confidence": 1.0, "name": "greet"},
"entities": []
})
query_utter = UserUttered(text="/query_init", parse_data={
"intent": {"confidence": 1.0, "name": "query_init"},
"entities": []
})
return [
greet_utter,
FollowupAction(name="utter_greet"),
query_utter,
FollowupAction(name="utter_query_type")
]
def greet_back(dispatcher):
dispatcher.utter_message("Going back!!!")
return [UserUttered(text="/greet", parse_data={
"intent": {"confidence": 1.0, "name": "greet"},
"entities": []
}), FollowupAction(name="utter_greet")]
class ComplainForm(FormAction):
def name(self):
return "complain_form"
@staticmethod
def required_slots(tracker):
if tracker.get_slot("complain_type"):
return ["complain_type", "complain_text"]
else:
return ["complain_type"]
def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:
"""A dictionary to map required slots to
- an extracted entity
- intent: value pairs
- a whole message
or a list of them, where a first match will be picked"""
return {"complain_type": [self.from_entity("complain_type"), self.from_text()], "complain_text": [self.from_entity(entity="navigation"), self.from_text()]}
# return {"complain_type": self.from_entity("complain_type"),"complain_text": self.from_entity(entity="any_thing")}
def validate_complain_type(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
complaints = ["food quality", "delivery", "naaniz app", "other"]
value = value.strip().lower()
if value == "back1" or value == "back":
return {"complain_type": INVALID_VALUE, "complain_text": INVALID_VALUE}
elif value in complaints:
return {"complain_type": value}
else:
dispatcher.utter_message("please type valid option.")
return {"complain_type": None}
def validate_complain_text(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
if value == "back2" or value.lower() == "back":
return {"complain_type": None, "complain_text": None}
else:
return {"complain_text": value}
def submit(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
if tracker.get_slot("complain_type") != INVALID_VALUE:
# saving
with open("./actionserver/customer_queries.json", "r") as queriesRef:
comp_type = tracker.get_slot("complain_type")
comp = tracker.get_slot("complain_text")
compObj = json.load(queriesRef)
compObj["complaints"].append({
"createdOn": util.timestamp(),
"complaint_area": comp_type,
"complaint": comp
})
with open("./actionserver/customer_queries.json", "w") as queriesRefWrite:
json.dump(compObj, queriesRefWrite, indent=4)
dispatcher.utter_message("Your Complaint :\n Complaint Area:{comp_type}\n Complaint: '{comp}' \n has been registered!".format(
comp_type=comp_type, comp=comp))
else:
dispatcher.utter_message("Complaints Form is closed")
li = [SlotSet("complain_type", None),
SlotSet("complain_text", None)]
li.extend(query_back(dispatcher))
return li
return [SlotSet("complain_type", None), SlotSet("complain_text", None)]
| 35.347518 | 163 | 0.626003 |
7941f77ef781fe41ec398d9da1f9e75100b44022 | 5,575 | py | Python | seq2seq/encoders/rnn_encoder.py | Aniruddha-Tapas/seq2seq | 29e4557860a7c6e997c039251195d9104a15585c | [
"Apache-2.0"
] | 53 | 2018-01-03T09:18:09.000Z | 2022-03-24T22:47:19.000Z | seq2seq/encoders/rnn_encoder.py | liyc7711/seq2seq | 1592b842b652ae648b96c164bead38eb089ce08e | [
"Apache-2.0"
] | 9 | 2018-01-29T19:14:54.000Z | 2020-11-03T23:41:05.000Z | seq2seq/encoders/rnn_encoder.py | liyc7711/seq2seq | 1592b842b652ae648b96c164bead38eb089ce08e | [
"Apache-2.0"
] | 31 | 2018-11-19T15:52:08.000Z | 2021-12-04T13:35:29.000Z | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Collection of RNN encoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow as tf
from tensorflow.contrib.rnn.python.ops import rnn
from seq2seq.encoders.encoder import Encoder, EncoderOutput
from seq2seq.training import utils as training_utils
def _unpack_cell(cell):
"""Unpack the cells because the stack_bidirectional_dynamic_rnn
expects a list of cells, one per layer."""
if isinstance(cell, tf.contrib.rnn.MultiRNNCell):
return cell._cells #pylint: disable=W0212
else:
return [cell]
def _default_rnn_cell_params():
"""Creates default parameters used by multiple RNN encoders.
"""
return {
"cell_class": "BasicLSTMCell",
"cell_params": {
"num_units": 128
},
"dropout_input_keep_prob": 1.0,
"dropout_output_keep_prob": 1.0,
"num_layers": 1,
"residual_connections": False,
"residual_combiner": "add",
"residual_dense": False
}
def _toggle_dropout(cell_params, mode):
"""Disables dropout during eval/inference mode
"""
cell_params = copy.deepcopy(cell_params)
if mode != tf.contrib.learn.ModeKeys.TRAIN:
cell_params["dropout_input_keep_prob"] = 1.0
cell_params["dropout_output_keep_prob"] = 1.0
return cell_params
class UnidirectionalRNNEncoder(Encoder):
"""
A unidirectional RNN encoder. Stacking should be performed as
part of the cell.
Args:
cell: An instance of tf.contrib.rnn.RNNCell
name: A name for the encoder
"""
def __init__(self, params, mode, name="forward_rnn_encoder"):
super(UnidirectionalRNNEncoder, self).__init__(params, mode, name)
self.params["rnn_cell"] = _toggle_dropout(self.params["rnn_cell"], mode)
@staticmethod
def default_params():
return {"rnn_cell": _default_rnn_cell_params()}
def encode(self, inputs, sequence_length, **kwargs):
cell = training_utils.get_rnn_cell(**self.params["rnn_cell"])
outputs, state = tf.nn.dynamic_rnn(
cell=cell,
inputs=inputs,
sequence_length=sequence_length,
dtype=tf.float32,
**kwargs)
return EncoderOutput(
outputs=outputs,
final_state=state,
attention_values=outputs,
attention_values_length=sequence_length)
class BidirectionalRNNEncoder(Encoder):
"""
A bidirectional RNN encoder. Uses the same cell for both the
forward and backward RNN. Stacking should be performed as part of
the cell.
Args:
cell: An instance of tf.contrib.rnn.RNNCell
name: A name for the encoder
"""
def __init__(self, params, mode, name="bidi_rnn_encoder"):
super(BidirectionalRNNEncoder, self).__init__(params, mode, name)
self.params["rnn_cell"] = _toggle_dropout(self.params["rnn_cell"], mode)
@staticmethod
def default_params():
return {"rnn_cell": _default_rnn_cell_params()}
def encode(self, inputs, sequence_length, **kwargs):
cell_fw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
cell_bw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
outputs, states = tf.nn.bidirectional_dynamic_rnn(
cell_fw=cell_fw,
cell_bw=cell_bw,
inputs=inputs,
sequence_length=sequence_length,
dtype=tf.float32,
**kwargs)
# Concatenate outputs and states of the forward and backward RNNs
outputs_concat = tf.concat(outputs, 2)
return EncoderOutput(
outputs=outputs_concat,
final_state=states,
attention_values=outputs_concat,
attention_values_length=sequence_length)
class StackBidirectionalRNNEncoder(Encoder):
"""
A stacked bidirectional RNN encoder. Uses the same cell for both the
forward and backward RNN. Stacking should be performed as part of
the cell.
Args:
cell: An instance of tf.contrib.rnn.RNNCell
name: A name for the encoder
"""
def __init__(self, params, mode, name="stacked_bidi_rnn_encoder"):
super(StackBidirectionalRNNEncoder, self).__init__(params, mode, name)
self.params["rnn_cell"] = _toggle_dropout(self.params["rnn_cell"], mode)
@staticmethod
def default_params():
return {"rnn_cell": _default_rnn_cell_params()}
def encode(self, inputs, sequence_length, **kwargs):
cell_fw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
cell_bw = training_utils.get_rnn_cell(**self.params["rnn_cell"])
cells_fw = _unpack_cell(cell_fw)
cells_bw = _unpack_cell(cell_bw)
result = rnn.stack_bidirectional_dynamic_rnn(
cells_fw=cells_fw,
cells_bw=cells_bw,
inputs=inputs,
dtype=tf.float32,
sequence_length=sequence_length,
**kwargs)
outputs_concat, _output_state_fw, _output_state_bw = result
final_state = (_output_state_fw, _output_state_bw)
return EncoderOutput(
outputs=outputs_concat,
final_state=final_state,
attention_values=outputs_concat,
attention_values_length=sequence_length)
| 31.145251 | 76 | 0.717668 |
7941f7839c3c76d1ccf1f1a4c4f3b6dec269fa4a | 2,515 | py | Python | bayesian-ab-django/bayesian_ab/abtest/models.py | robinsonkwame/csdt_fora_experiments | fd3f9b208feac5c26457d69ed54e3fe39158614b | [
"MIT"
] | null | null | null | bayesian-ab-django/bayesian_ab/abtest/models.py | robinsonkwame/csdt_fora_experiments | fd3f9b208feac5c26457d69ed54e3fe39158614b | [
"MIT"
] | null | null | null | bayesian-ab-django/bayesian_ab/abtest/models.py | robinsonkwame/csdt_fora_experiments | fd3f9b208feac5c26457d69ed54e3fe39158614b | [
"MIT"
] | null | null | null | import uuid
import numpy as np
import scipy.stats
from django.utils import timezone
from django.db import models
class Campaign(models.Model):
''' Record for AB Tests conducted
'''
timestamp = models.DateTimeField(
default=timezone.now,
help_text='timestamp of creation of campaign'
)
code = models.UUIDField(
default=uuid.uuid4,
editable=False,
help_text='AB test campaign code'
)
name = models.CharField(
unique=True,
max_length=255,
help_text='Name of AB test'
)
description = models.TextField(
blank=True,
default='',
help_text='Description of AB test'
)
active = models.BooleanField(
default=True,
help_text='True if campaign is active'
)
allow_repeat = models.BooleanField(
default=True,
help_text='True if repeat impressions/conversions allowed by the same user'
)
def __str__(self):
return f'AB Test Campaign: {self.code}, {self.name}'
class Variant(models.Model):
''' Model to store variants (treatments)
within an AB test campaign. Variants are the different
versions served to users (A/B/C...)
'''
campaign = models.ForeignKey(
Campaign,
related_name='variants',
on_delete=models.CASCADE,
)
code = models.CharField(
max_length=32,
help_text='Variant code, (i.e., A, B, C etc)'
)
name = models.CharField(
max_length=64,
help_text='Name of variant'
)
impressions = models.IntegerField(
default=1,
help_text='Number of times variant was shown/visited'
)
conversions = models.IntegerField(
default=1,
help_text='Number of conversions for variant'
)
conversion_rate = models.FloatField(
default=1.0,
help_text='conversions / impressions'
)
html_template = models.FilePathField(
null=True,
help_text='Path to HTML template for variant View'
)
def beta_pdf(self, x_vals):
# Get beta distribution values given corresponding X values where 0 < X <1
# Where alpha = conversions and beta = impressions - conversions
y_vals = list(scipy.stats.beta.pdf(
x_vals,
max(self.conversions, 1),
max(self.impressions-self.conversions, 1)
)
)
return y_vals
def __str__(self):
return f'Variant: {self.code} | {self.campaign.code} '
| 27.637363 | 83 | 0.6167 |
7941f7ccd12200d0df5cfab83740581efb0eee71 | 2,662 | py | Python | app/utils6L/utils6L.py | bvbgrad/fpq02 | 4bd4017913dd2bcb0d39e24002f5ddbe4cde3c09 | [
"MIT"
] | null | null | null | app/utils6L/utils6L.py | bvbgrad/fpq02 | 4bd4017913dd2bcb0d39e24002f5ddbe4cde3c09 | [
"MIT"
] | null | null | null | app/utils6L/utils6L.py | bvbgrad/fpq02 | 4bd4017913dd2bcb0d39e24002f5ddbe4cde3c09 | [
"MIT"
] | null | null | null | """
Copyright 6L LLC (2021) - See MIT LICENSE
Cross project features or decorators, similar to Spring Aspect-Oriented Programming(AOP)
https://docs.spring.io/spring-framework/docs/4.3.12.RELEASE/spring-framework-reference/html/aop.html
usage - copy/paste package directory into the project file structure and
Special considerations for the logging setup and decorator
Logger name is defined as an environment variable.
__init__.py module of the main package needs to ingest the .env file
from dotenv import load_dotenv
load_dotenv()
ensure the following import statement is added to the rest of the project modules.
import app.utils6L.utils6L as utils
import logging
import os
logger_name = os.getenv("LOGGER_NAME")
logger = logging.getLogger(logger_name)
Then, for example, reference the log_wrap decorator as follows
@utils.log_wrap
or, directly invoke a log message
logger.info("log message")
"""
import logging
from logging.handlers import RotatingFileHandler
import os
logger_name = os.getenv("LOGGER_NAME")
logger = logging.getLogger(logger_name)
def log_wrap(func):
def wrapped(*args, **kwargs):
logger.info(f"enter {func.__name__}()")
result = func(*args, **kwargs)
logger.info(f"exit {func.__name__}()")
return result
return wrapped
def setup_logging():
if not os.path.exists('logs'):
os.mkdir('logs')
# use environment variables to set each logging channel level
logger_level_base = os.getenv("LOGGER_LEVEL_BASE")
logger_level_file = os.getenv("LOGGER_LEVEL_FILE")
logger_level_stream = os.getenv("LOGGER_LEVEL_STREAM")
logger.setLevel(logger_level_base)
# create file handler which logs even debug messages
fh = RotatingFileHandler(f"logs/{logger_name}.log", maxBytes=1000000, backupCount=10)
fh.setLevel(logger_level_file)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logger_level_stream)
# create formatter and add it to the handlers
formatter = logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s [%(pathname)s:%(lineno)d]')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
# logger.debug(f'setup_logging():{logger_name}: example debug message')
# logger.info(f"setup_logging():{logger_name}: example info message")
# logger.warning(f'setup_logging():{logger_name}: example warn message')
# logger.error(f'setup_logging():{logger_name}: example error message')
# logger.critical(f'setup_logging():{logger_name}: example critical message')
| 32.864198 | 100 | 0.739669 |
7941f8656f09ac1fd4fcbae6a4d62a52c76b7b57 | 563 | py | Python | Leetcode/0244. Shortest Word Distance II/0244.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | Leetcode/0244. Shortest Word Distance II/0244.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | Leetcode/0244. Shortest Word Distance II/0244.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | class WordDistance:
def __init__(self, wordsDict: List[str]):
self.wordToIndices = defaultdict(list)
for i, word in enumerate(wordsDict):
self.wordToIndices[word].append(i)
def shortest(self, word1: str, word2: str) -> int:
indices1 = self.wordToIndices[word1]
indices2 = self.wordToIndices[word2]
ans = math.inf
i = 0
j = 0
while i < len(indices1) and j < len(indices2):
ans = min(ans, abs(indices1[i] - indices2[j]))
if indices1[i] < indices2[j]:
i += 1
else:
j += 1
return ans
| 25.590909 | 52 | 0.609236 |
7941f8a979178c61230a588ad0dc604992638b46 | 5,424 | py | Python | migrations/versions/017bc688b20f_.py | CatsAreEvil/box-office-studio | 0fcf19ccd4f65622d94c6cf0c6ac2ef4fd1bd5f8 | [
"MIT"
] | null | null | null | migrations/versions/017bc688b20f_.py | CatsAreEvil/box-office-studio | 0fcf19ccd4f65622d94c6cf0c6ac2ef4fd1bd5f8 | [
"MIT"
] | 1 | 2019-06-12T01:25:39.000Z | 2019-06-12T01:25:40.000Z | migrations/versions/017bc688b20f_.py | CatsAreEvil/box-office-studio | 0fcf19ccd4f65622d94c6cf0c6ac2ef4fd1bd5f8 | [
"MIT"
] | null | null | null | """empty message
Revision ID: 017bc688b20f
Revises: 270947efbfd8
Create Date: 2019-03-15 19:17:26.979166
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '017bc688b20f'
down_revision = '270947efbfd8'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('results', sa.Column('movie_10', sa.String(), nullable=True))
op.add_column('results', sa.Column('movie_10_results', sa.Float(), nullable=True))
op.add_column('results', sa.Column('movie_11', sa.String(), nullable=True))
op.add_column('results', sa.Column('movie_11_results', sa.Float(), nullable=True))
op.add_column('results', sa.Column('movie_12', sa.String(), nullable=True))
op.add_column('results', sa.Column('movie_12_results', sa.Float(), nullable=True))
op.add_column('results', sa.Column('movie_13', sa.String(), nullable=True))
op.add_column('results', sa.Column('movie_13_results', sa.Float(), nullable=True))
op.add_column('results', sa.Column('movie_14', sa.String(), nullable=True))
op.add_column('results', sa.Column('movie_14_results', sa.Float(), nullable=True))
op.add_column('results', sa.Column('movie_15', sa.String(), nullable=True))
op.add_column('results', sa.Column('movie_15_results', sa.Float(), nullable=True))
op.add_column('results', sa.Column('movie_16', sa.String(), nullable=True))
op.add_column('results', sa.Column('movie_16_results', sa.Float(), nullable=True))
op.add_column('results', sa.Column('movie_17', sa.String(), nullable=True))
op.add_column('results', sa.Column('movie_17_results', sa.Float(), nullable=True))
op.add_column('results', sa.Column('movie_18', sa.String(), nullable=True))
op.add_column('results', sa.Column('movie_18_results', sa.Float(), nullable=True))
op.add_column('results', sa.Column('movie_19', sa.String(), nullable=True))
op.add_column('results', sa.Column('movie_19_results', sa.Float(), nullable=True))
op.add_column('results', sa.Column('movie_2', sa.String(), nullable=True))
op.add_column('results', sa.Column('movie_20', sa.String(), nullable=True))
op.add_column('results', sa.Column('movie_20_results', sa.Float(), nullable=True))
op.add_column('results', sa.Column('movie_2_results', sa.Float(), nullable=True))
op.add_column('results', sa.Column('movie_3', sa.String(), nullable=True))
op.add_column('results', sa.Column('movie_3_results', sa.Float(), nullable=True))
op.add_column('results', sa.Column('movie_4', sa.String(), nullable=True))
op.add_column('results', sa.Column('movie_4_results', sa.Float(), nullable=True))
op.add_column('results', sa.Column('movie_5', sa.String(), nullable=True))
op.add_column('results', sa.Column('movie_5_results', sa.Float(), nullable=True))
op.add_column('results', sa.Column('movie_6', sa.String(), nullable=True))
op.add_column('results', sa.Column('movie_6_results', sa.Float(), nullable=True))
op.add_column('results', sa.Column('movie_7', sa.String(), nullable=True))
op.add_column('results', sa.Column('movie_7_results', sa.Float(), nullable=True))
op.add_column('results', sa.Column('movie_8', sa.String(), nullable=True))
op.add_column('results', sa.Column('movie_8_results', sa.Float(), nullable=True))
op.add_column('results', sa.Column('movie_9', sa.String(), nullable=True))
op.add_column('results', sa.Column('movie_9_results', sa.Float(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('results', 'movie_9_results')
op.drop_column('results', 'movie_9')
op.drop_column('results', 'movie_8_results')
op.drop_column('results', 'movie_8')
op.drop_column('results', 'movie_7_results')
op.drop_column('results', 'movie_7')
op.drop_column('results', 'movie_6_results')
op.drop_column('results', 'movie_6')
op.drop_column('results', 'movie_5_results')
op.drop_column('results', 'movie_5')
op.drop_column('results', 'movie_4_results')
op.drop_column('results', 'movie_4')
op.drop_column('results', 'movie_3_results')
op.drop_column('results', 'movie_3')
op.drop_column('results', 'movie_2_results')
op.drop_column('results', 'movie_20_results')
op.drop_column('results', 'movie_20')
op.drop_column('results', 'movie_2')
op.drop_column('results', 'movie_19_results')
op.drop_column('results', 'movie_19')
op.drop_column('results', 'movie_18_results')
op.drop_column('results', 'movie_18')
op.drop_column('results', 'movie_17_results')
op.drop_column('results', 'movie_17')
op.drop_column('results', 'movie_16_results')
op.drop_column('results', 'movie_16')
op.drop_column('results', 'movie_15_results')
op.drop_column('results', 'movie_15')
op.drop_column('results', 'movie_14_results')
op.drop_column('results', 'movie_14')
op.drop_column('results', 'movie_13_results')
op.drop_column('results', 'movie_13')
op.drop_column('results', 'movie_12_results')
op.drop_column('results', 'movie_12')
op.drop_column('results', 'movie_11_results')
op.drop_column('results', 'movie_11')
op.drop_column('results', 'movie_10_results')
op.drop_column('results', 'movie_10')
# ### end Alembic commands ###
| 52.660194 | 86 | 0.696718 |
7941f91e26de2aac163060b5e0be558e7a97c99a | 4,317 | py | Python | examples/seismic/cemeai/dados_marmousi.py | pedrospeixoto/devito | 7abc42202214c534eb962c27cf294ec9893615e1 | [
"MIT"
] | 1 | 2021-03-25T21:23:03.000Z | 2021-03-25T21:23:03.000Z | examples/seismic/cemeai/dados_marmousi.py | pedrospeixoto/devito | 7abc42202214c534eb962c27cf294ec9893615e1 | [
"MIT"
] | 40 | 2021-04-09T07:57:02.000Z | 2022-03-21T08:15:33.000Z | examples/seismic/cemeai/dados_marmousi.py | pedrospeixoto/devito | 7abc42202214c534eb962c27cf294ec9893615e1 | [
"MIT"
] | null | null | null | #==============================================================================
# Bibliotecas Python
#==============================================================================
import numpy as np
import segyio
import sys
from scipy.interpolate import CubicSpline
from scipy.interpolate import interp1d
import matplotlib.pyplot as plot
import matplotlib.ticker as mticker
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import cm
from matplotlib import ticker
#==============================================================================
#==============================================================================
# Manipulando Dados do Marmousi
#==============================================================================
with segyio.open('vel_models_file/marmousi_perfil1.segy') as segyfile:
vel = segyio.tools.cube(segyfile)[0,:,:]
nptxvel = vel.shape[0]
nptyvel = vel.shape[1]
x0vel = 0.
x1vel = 17000.
y0vel = 0.
y1vel = 3500.
hxvel = (x1vel-x0vel)/(nptxvel-1)
hyvel = (y1vel-y0vel)/(nptyvel-1)
Xvel = np.linspace(x0vel,x1vel,nptxvel)
Yvel = np.linspace(y0vel,y1vel,nptyvel)
fscale = 10**(-3)
vel = fscale*vel
#==============================================================================
#==============================================================================
# Interpolando Dados do Marmousi
#==============================================================================
def inter_marmousi(teste):
nptx = teste.nptx
nptz = teste.nptz
X0 = teste.X0
Z0 = teste.Z0
C0 = np.zeros((nptx,nptz))
C0x = np.zeros((nptx,nptyvel))
for j in range(nptyvel):
x = Xvel
y = vel[0:nptxvel,j]
#cs = interp1d(x,y,kind='linear',fill_value="extrapolate")
#cs = interp1d(x,y,kind='linear',fill_value="extrapolate")
cs = interp1d(x,y,kind='nearest',fill_value="extrapolate")
#cs = interp1d(x,y,kind='previous',fill_value="extrapolate")
#cs = interp1d(x,y,kind='next',fill_value="extrapolate")
#cs = CubicSpline(x,y)
xs = X0
C0x[0:nptx,j] = cs(xs)
for i in range(nptx):
x = Yvel
y = C0x[i,0:nptyvel]
#cs = interp1d(x,y,kind='linear',fill_value="extrapolate")
#cs = interp1d(x,y,kind='linear',fill_value="extrapolate")
cs = interp1d(x,y,kind='nearest',fill_value="extrapolate")
#cs = interp1d(x,y,kind='previous',fill_value="extrapolate")
#cs = interp1d(x,y,kind='next',fill_value="extrapolate")
#cs = CubicSpline(x,y)
xs = Z0
C0[i,0:nptz] = cs(xs)
return C0
#==============================================================================
#==============================================================================
# Plot Velocidades
#==============================================================================
def graph2dvel(vel,teste):
x0 = teste.x0
x1 = teste.x1
z0 = teste.z0
z1 = teste.z1
nptx = teste.nptx
nptz = teste.nptz
plot.figure(figsize = (14,4))
fscale = 10**(-3)
scale = np.amax(vel)
extent = [fscale*x0,fscale*x1, fscale*z1, fscale*z0]
fig = plot.imshow(np.transpose(vel), vmin=np.amin(vel),vmax=scale, cmap=cm.jet, extent=extent)
plot.gca().xaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))
plot.gca().yaxis.set_major_formatter(mticker.FormatStrFormatter('%.1f km'))
plot.title('Velocity Profile - Marmousi Interpolated')
plot.grid()
ax = plot.gca()
divider = make_axes_locatable(ax)
ax.xaxis.set_major_locator(plot.MaxNLocator(4))
ax.yaxis.set_major_locator(plot.MaxNLocator(4))
cax = divider.append_axes("right", size="4%", pad=0.025)
tick_locator = ticker.MaxNLocator(nbins=4)
cbar = plot.colorbar(fig, cax=cax, format='%.2e')
cbar.locator = tick_locator
cbar.update_ticks()
cbar.set_label('Velocity [km/s]')
plot.savefig('figures/vel_model/marmousi_interpolated.png',dpi=100)
plot.show()
plot.close()
#============================================================================== | 38.891892 | 98 | 0.4795 |
7941fdfe0526748b158f22f2562bb2a55456666d | 2,088 | py | Python | gym_wmgds/envs/mujoco/thrower.py | ozcell/gym_wmgds_ma | c2cb22943913361947216b908d50decc46616e99 | [
"Python-2.0",
"OLDAP-2.7"
] | 1 | 2020-12-23T16:38:15.000Z | 2020-12-23T16:38:15.000Z | gym_wmgds/envs/mujoco/thrower.py | ozcell/gym_wmgds_ma | c2cb22943913361947216b908d50decc46616e99 | [
"Python-2.0",
"OLDAP-2.7"
] | null | null | null | gym_wmgds/envs/mujoco/thrower.py | ozcell/gym_wmgds_ma | c2cb22943913361947216b908d50decc46616e99 | [
"Python-2.0",
"OLDAP-2.7"
] | null | null | null | import numpy as np
from gym_wmgds import utils
from gym_wmgds.envs.mujoco import mujoco_env
class ThrowerEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
utils.EzPickle.__init__(self)
self._ball_hit_ground = False
self._ball_hit_location = None
mujoco_env.MujocoEnv.__init__(self, 'thrower.xml', 5)
def step(self, a):
ball_xy = self.get_body_com("ball")[:2]
goal_xy = self.get_body_com("goal")[:2]
if not self._ball_hit_ground and self.get_body_com("ball")[2] < -0.25:
self._ball_hit_ground = True
self._ball_hit_location = self.get_body_com("ball")
if self._ball_hit_ground:
ball_hit_xy = self._ball_hit_location[:2]
reward_dist = -np.linalg.norm(ball_hit_xy - goal_xy)
else:
reward_dist = -np.linalg.norm(ball_xy - goal_xy)
reward_ctrl = - np.square(a).sum()
reward = reward_dist + 0.002 * reward_ctrl
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
done = False
return ob, reward, done, dict(reward_dist=reward_dist,
reward_ctrl=reward_ctrl)
def viewer_setup(self):
self.viewer.cam.trackbodyid = 0
self.viewer.cam.distance = 4.0
def reset_model(self):
self._ball_hit_ground = False
self._ball_hit_location = None
qpos = self.init_qpos
self.goal = np.array([self.np_random.uniform(low=-0.3, high=0.3),
self.np_random.uniform(low=-0.3, high=0.3)])
qpos[-9:-7] = self.goal
qvel = self.init_qvel + self.np_random.uniform(low=-0.005,
high=0.005, size=self.model.nv)
qvel[7:] = 0
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat[:7],
self.sim.data.qvel.flat[:7],
self.get_body_com("r_wrist_roll_link"),
self.get_body_com("ball"),
self.get_body_com("goal"),
])
| 34.229508 | 78 | 0.603927 |
7941ffd1968459e324946a62464c24e505ce0167 | 4,935 | py | Python | lm_experiment.py | cbamann/language-model | d14410c2302bf42bb771abc4a6b859704847798e | [
"MIT"
] | null | null | null | lm_experiment.py | cbamann/language-model | d14410c2302bf42bb771abc4a6b859704847798e | [
"MIT"
] | null | null | null | lm_experiment.py | cbamann/language-model | d14410c2302bf42bb771abc4a6b859704847798e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import tensorflow as tf
PROJECT_FOLDER = "./"
# READ AND PREPROCESS LOCAL FILES
exec(open(PROJECT_FOLDER + "setup.py").read())
fname_nn_out = FOLDER_NN_MODELS + "exp"
BATCHES_TO_PROCESS = 500
###############################################################################
# Download word embedding
path_embedding = PROJECT_FOLDER + "wordembeddings-dim100.word2vec"
model_embed = KeyedVectors.load_word2vec_format( path_embedding )
# EMBEDDING MATRIX:
vocab_size = len(vocab_inv)
embeddings = np.zeros( ( FLAGS.vocab_size, FLAGS.embedding_dim ) )
# + 2 since added symbols <unk> and <pad>
matches = 0
for k, v in vocab_dict.items():
if k in model_embed.vocab:
embeddings[v] = model_embed[k]
matches += 1
else:
embeddings[v] = np.random.uniform(low=-0.25, high=0.25, size=FLAGS.embedding_dim )
print("%d words out of %d could be loaded" % (matches, vocab_size))
###############################################################################
#-----------------------------------------------------------------------------#
# DEFINE THE GRAPH
tf.reset_default_graph() # clean up just in case
x_model = tf.placeholder(tf.int64 , shape = (None, FLAGS.sent_len))
# Define weights
W = tf.get_variable(name = "W_out",
shape = (FLAGS.hidden_units, FLAGS.vocab_size),
dtype = tf.float64,
initializer = tf.contrib.layers.xavier_initializer())
embedding_matrix = tf.constant(embeddings,
shape = (FLAGS.vocab_size, FLAGS.embedding_dim),
dtype = tf.float64)
embedded_x = tf.nn.embedding_lookup(embedding_matrix, x_model)
# prepare input and output sequences
X_series = tf.unstack(embedded_x[:,:-1,:], axis = 1)
Y_series = tf.unstack(x_model[:,1:], axis = 1)
#-----------------------------------------------------------------------------#
# CONSTRUCT THE NETWORK
lstm = tf.nn.rnn_cell.LSTMCell(num_units = FLAGS.hidden_units,
dtype= tf.float64 ) # state_is_tuple=True)
_state = lstm.zero_state(dtype = tf.float64, batch_size = FLAGS.batch_size )
# unrolling
outputs = []
for i in range(len(X_series)):
(cell_output, _state) = lstm(X_series[i], _state)
outputs.append(cell_output)
#-----------------------------------------------------------------------------#
# COMPUTE THE LOSS
loss = 0.0
for i in range(len(X_series) ): # time dimension
labels_i = Y_series[i]
out = outputs[i]
# This op expects unscaled logits, since it performs a softmax on logits internally for efficiency.
# Do not call this op with the output of softmax, as it will produce incorrect results.
prob_unnorm = tf.matmul(out, W)
loss_temp = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels = labels_i,
logits = prob_unnorm)
is_padded = tf.dtypes.cast(tf.not_equal(labels_i, vocab_dict["<pad>"]),
dtype=tf.float64 )
loss_temp = tf.math.multiply(loss_temp, is_padded)
loss += loss_temp # is_padded*loss_temp
#-----------------------------------------------------------------------------#
# OPTIMIZER
params = tf.trainable_variables()
# tf.reset_default_graph() # if something is wrong
optimizer = tf.train.AdamOptimizer(learning_rate = FLAGS.learning_rate) # default rate
gradients, variables = zip(*optimizer.compute_gradients(loss))
gradients, _ = tf.clip_by_global_norm(gradients, FLAGS.clip_gradient)
train_op = optimizer.apply_gradients(zip(gradients, variables))
###############################################################################
# Train the network
# Input parameters
test_mode = True
train_df = train_df_enc
fname_nn = fname_nn_out
#-----------------------------------------------------------------------------#
init = tf.global_variables_initializer()
saver = tf.train.Saver()
batches_total = int(len(train_df_enc)/ FLAGS.batch_size)
test_mode = True
if test_mode:
sess = tf.Session(config=session_conf_cluster)
#train_df_loc = train_df[:NUM_FOR_TEST]
no_of_batches = min([BATCHES_TO_PROCESS, batches_total])
else:
sess = tf.Session(config=session_conf_cluster)
no_of_batches = batches_total
# run session
with sess.as_default():
sess.run(init)
# feed batches to network
print("No of batches: {}".format(no_of_batches))
learning_errors = []
ptr = 0
for j in range(no_of_batches):
print("Batch: {}".format(j))
x_train = train_df_enc[ptr: min([ptr + FLAGS.batch_size, len(train_df_enc)-1 ])]
x_train , added = prepare_batch(x_train)
_ , l = sess.run([train_op, loss], feed_dict = {x_model: x_train })
ptr += FLAGS.batch_size
# save the session parameters
saver.save(sess, fname_nn)
| 32.045455 | 104 | 0.581155 |
7942041aff96367e77da0611a309d4a2f7844726 | 5,724 | py | Python | examples/model_compression/distill_lstm/bert_distill.py | zhengya01/PaddleNLP | b28bfb7b864781cc871e2fb910bd55ea705d3fec | [
"Apache-2.0"
] | null | null | null | examples/model_compression/distill_lstm/bert_distill.py | zhengya01/PaddleNLP | b28bfb7b864781cc871e2fb910bd55ea705d3fec | [
"Apache-2.0"
] | null | null | null | examples/model_compression/distill_lstm/bert_distill.py | zhengya01/PaddleNLP | b28bfb7b864781cc871e2fb910bd55ea705d3fec | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import paddle
import paddle.nn as nn
from paddle.metric import Accuracy
from paddlenlp.transformers import BertForSequenceClassification
from paddlenlp.metrics import AccuracyAndF1
from paddlenlp.datasets import GlueSST2, GlueQQP, ChnSentiCorp
from args import parse_args
from small import BiLSTM
from data import create_distill_loader
TASK_CLASSES = {
"sst-2": (GlueSST2, Accuracy),
"qqp": (GlueQQP, AccuracyAndF1),
"senta": (ChnSentiCorp, Accuracy),
}
class TeacherModel(object):
def __init__(self, model_name, param_path):
self.model = BertForSequenceClassification.from_pretrained(model_name)
self.model.set_state_dict(paddle.load(param_path))
self.model.eval()
def evaluate(task_name, model, metric, data_loader):
model.eval()
metric.reset()
for i, batch in enumerate(data_loader):
if task_name == 'qqp':
_, _, student_input_ids_1, seq_len_1, student_input_ids_2, seq_len_2, labels = batch
logits = model(student_input_ids_1, seq_len_1, student_input_ids_2,
seq_len_2)
else:
_, _, student_input_ids, seq_len, labels = batch
logits = model(student_input_ids, seq_len)
correct = metric.compute(logits, labels)
metric.update(correct)
res = metric.accumulate()
if isinstance(metric, AccuracyAndF1):
print(
"acc: %s, precision: %s, recall: %s, f1: %s, acc and f1: %s, " % (
res[0],
res[1],
res[2],
res[3],
res[4], ),
end='')
else:
print("acc: %s, " % (res), end='')
model.train()
def do_train(agrs):
train_data_loader, dev_data_loader = create_distill_loader(
args.task_name,
model_name=args.model_name,
vocab_path=args.vocab_path,
batch_size=args.batch_size,
max_seq_length=args.max_seq_length,
n_iter=args.n_iter,
whole_word_mask=args.whole_word_mask,
seed=args.seed)
model = BiLSTM(args.emb_dim, args.hidden_size, args.vocab_size,
args.output_dim, args.vocab_path, args.padding_idx,
args.num_layers, args.dropout_prob, args.init_scale,
args.embedding_name)
if args.optimizer == 'adadelta':
optimizer = paddle.optimizer.Adadelta(
learning_rate=args.lr, rho=0.95, parameters=model.parameters())
else:
optimizer = paddle.optimizer.Adam(
learning_rate=args.lr, parameters=model.parameters())
ce_loss = nn.CrossEntropyLoss()
mse_loss = nn.MSELoss()
klloss = nn.KLDivLoss()
metric_class = TASK_CLASSES[args.task_name][1]
metric = metric_class()
teacher = TeacherModel(
model_name=args.model_name, param_path=args.teacher_path)
print("Start to distill student model.")
global_step = 0
tic_train = time.time()
for epoch in range(args.max_epoch):
model.train()
for i, batch in enumerate(train_data_loader):
if args.task_name == 'qqp':
bert_input_ids, bert_segment_ids, student_input_ids_1, seq_len_1, student_input_ids_2, seq_len_2, labels = batch
else:
bert_input_ids, bert_segment_ids, student_input_ids, seq_len, labels = batch
# Calculate teacher model's forward.
with paddle.no_grad():
teacher_logits = teacher.model(bert_input_ids, bert_segment_ids)
# Calculate student model's forward.
if args.task_name == 'qqp':
logits = model(student_input_ids_1, seq_len_1,
student_input_ids_2, seq_len_2)
else:
logits = model(student_input_ids, seq_len)
loss = args.alpha * ce_loss(logits, labels) + (
1 - args.alpha) * mse_loss(logits, teacher_logits)
loss.backward()
optimizer.step()
optimizer.clear_grad()
if i % args.log_freq == 0:
print(
"global step %d, epoch: %d, batch: %d, loss: %f, speed: %.4f step/s"
% (global_step, epoch, i, loss,
args.log_freq / (time.time() - tic_train)))
tic_eval = time.time()
acc = evaluate(args.task_name, model, metric, dev_data_loader)
print("eval done total : %s s" % (time.time() - tic_eval))
tic_train = time.time()
if i % args.save_steps == 0:
paddle.save(
model.state_dict(),
os.path.join(args.output_dir,
"step_" + str(global_step) + ".pdparams"))
paddle.save(optimizer.state_dict(),
os.path.join(args.output_dir,
"step_" + str(global_step) + ".pdopt"))
global_step += 1
if __name__ == '__main__':
args = parse_args()
print(args)
paddle.seed(args.seed)
do_train(args)
| 35.116564 | 128 | 0.608491 |
794204381a4bba89b40b82dc0f329c68012f0d0a | 3,552 | py | Python | alipay/aop/api/domain/SaleProduct.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 213 | 2018-08-27T16:49:32.000Z | 2021-12-29T04:34:12.000Z | alipay/aop/api/domain/SaleProduct.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 29 | 2018-09-29T06:43:00.000Z | 2021-09-02T03:27:32.000Z | alipay/aop/api/domain/SaleProduct.py | snowxmas/alipay-sdk-python-all | 96870ced60facd96c5bce18d19371720cbda3317 | [
"Apache-2.0"
] | 59 | 2018-08-27T16:59:26.000Z | 2022-03-25T10:08:15.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.ProductProvider import ProductProvider
class SaleProduct(object):
def __init__(self):
self._channel_type = None
self._id = None
self._market_price = None
self._product_provider = None
self._sale_price = None
self._status = None
@property
def channel_type(self):
return self._channel_type
@channel_type.setter
def channel_type(self, value):
self._channel_type = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def market_price(self):
return self._market_price
@market_price.setter
def market_price(self, value):
self._market_price = value
@property
def product_provider(self):
return self._product_provider
@product_provider.setter
def product_provider(self, value):
if isinstance(value, ProductProvider):
self._product_provider = value
else:
self._product_provider = ProductProvider.from_alipay_dict(value)
@property
def sale_price(self):
return self._sale_price
@sale_price.setter
def sale_price(self, value):
self._sale_price = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
def to_alipay_dict(self):
params = dict()
if self.channel_type:
if hasattr(self.channel_type, 'to_alipay_dict'):
params['channel_type'] = self.channel_type.to_alipay_dict()
else:
params['channel_type'] = self.channel_type
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.market_price:
if hasattr(self.market_price, 'to_alipay_dict'):
params['market_price'] = self.market_price.to_alipay_dict()
else:
params['market_price'] = self.market_price
if self.product_provider:
if hasattr(self.product_provider, 'to_alipay_dict'):
params['product_provider'] = self.product_provider.to_alipay_dict()
else:
params['product_provider'] = self.product_provider
if self.sale_price:
if hasattr(self.sale_price, 'to_alipay_dict'):
params['sale_price'] = self.sale_price.to_alipay_dict()
else:
params['sale_price'] = self.sale_price
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SaleProduct()
if 'channel_type' in d:
o.channel_type = d['channel_type']
if 'id' in d:
o.id = d['id']
if 'market_price' in d:
o.market_price = d['market_price']
if 'product_provider' in d:
o.product_provider = d['product_provider']
if 'sale_price' in d:
o.sale_price = d['sale_price']
if 'status' in d:
o.status = d['status']
return o
| 29.6 | 83 | 0.589245 |
7942046203526ad9416db1b07c180a6da49a45f9 | 1,070 | py | Python | savepagenow/exceptions.py | palewire/savepagenow | 33c57916393e9697cf31dae485efc3f4cfbe21d4 | [
"MIT"
] | 8 | 2021-10-04T17:20:34.000Z | 2022-03-27T23:57:20.000Z | savepagenow/exceptions.py | palewire/savepagenow | 33c57916393e9697cf31dae485efc3f4cfbe21d4 | [
"MIT"
] | 5 | 2021-10-03T23:11:02.000Z | 2022-03-28T23:37:08.000Z | savepagenow/exceptions.py | palewire/savepagenow | 33c57916393e9697cf31dae485efc3f4cfbe21d4 | [
"MIT"
] | 3 | 2021-12-12T17:57:09.000Z | 2022-01-28T15:25:52.000Z | class CachedPage(Exception):
"""Raised when archive.org declines to make a new capture and instead returns the cached version of most recent archive."""
pass
class WaybackRuntimeError(Exception):
"""An error returned by the Wayback Machine."""
pass
class BlockedByRobots(WaybackRuntimeError):
"""Raised when archive.org has been blocked by the site's robots.txt access control instructions."""
pass
class BadGateway(WaybackRuntimeError):
"""Raised when archive.org when you receive a 502 bad gateway status code in response to your request."""
pass
class Forbidden(WaybackRuntimeError):
"""Raised when archive.org when you receive a 403 forbidden status code in response to your request."""
pass
class TooManyRequests(WaybackRuntimeError):
"""Raised when archive.org when you have exceeded its throttle on request frequency. Slow it down."""
pass
class UnknownError(WaybackRuntimeError):
"""Raised when archive.org when you receive a 520 unknown status code in response to your request."""
pass
| 26.097561 | 127 | 0.740187 |
794204a0687ab68af88e0c22e037b4d8e173452b | 874 | py | Python | tensorflow_datasets/core/community/__init__.py | sumanthd17/datasets-1 | 6c84ccca956d8f134a42aa12c0b4caa18c730ed0 | [
"Apache-2.0"
] | 1 | 2021-02-04T10:07:18.000Z | 2021-02-04T10:07:18.000Z | tensorflow_datasets/core/community/__init__.py | sumanthd17/datasets-1 | 6c84ccca956d8f134a42aa12c0b4caa18c730ed0 | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/core/community/__init__.py | sumanthd17/datasets-1 | 6c84ccca956d8f134a42aa12c0b4caa18c730ed0 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Community dataset API."""
from tensorflow_datasets.core.community.load import builder_cls_from_module
from tensorflow_datasets.core.community.register_package import community_register
__all__ = [
'builder_cls_from_module',
'community_register',
]
| 33.615385 | 82 | 0.776888 |
794205d539a94144f88cfe00a601e3baf9d210d0 | 1,388 | py | Python | src/dispatch/metrics.py | roor0/dispatch | 12c4f567096411abe62abaf61c7c124496764346 | [
"Apache-2.0"
] | 3,417 | 2020-02-23T22:54:47.000Z | 2022-03-31T13:01:01.000Z | src/dispatch/metrics.py | roor0/dispatch | 12c4f567096411abe62abaf61c7c124496764346 | [
"Apache-2.0"
] | 607 | 2020-02-24T14:27:02.000Z | 2022-03-30T19:15:39.000Z | src/dispatch/metrics.py | roor0/dispatch | 12c4f567096411abe62abaf61c7c124496764346 | [
"Apache-2.0"
] | 359 | 2020-02-24T19:04:43.000Z | 2022-03-29T06:48:12.000Z | import logging
from dispatch.plugins.base import plugins
from .config import METRIC_PROVIDERS
log = logging.getLogger(__file__)
class Metrics(object):
_providers = []
def __init__(self):
if not METRIC_PROVIDERS:
log.info(
"No metric providers defined via METRIC_PROVIDERS env var. Metrics will not be sent."
)
else:
self._providers = METRIC_PROVIDERS
def gauge(self, name, value, tags=None):
for provider in self._providers:
log.debug(
f"Sending gauge metric {name} to provider {provider}. Value: {value} Tags: {tags}"
)
p = plugins.get(provider)
p.gauge(name, value, tags=tags)
def counter(self, name, value=None, tags=None):
for provider in self._providers:
log.debug(
f"Sending counter metric {name} to provider {provider}. Value: {value} Tags: {tags}"
)
p = plugins.get(provider)
p.counter(name, value=value, tags=tags)
def timer(self, name, value, tags=None):
for provider in self._providers:
log.debug(
f"Sending timer metric {name} to provider {provider}. Value: {value} Tags: {tags}"
)
p = plugins.get(provider)
p.timer(name, value, tags=tags)
provider = Metrics()
| 29.531915 | 101 | 0.581412 |
794207a0f1402180cd7a47948ed10aea9066378a | 63 | py | Python | Other_AIMA_Scripts/test.py | erensezener/aima-based-irl | fbbe28986cec0b5e58fef0f00338a180ed03759a | [
"MIT"
] | 12 | 2015-06-17T05:15:40.000Z | 2021-05-18T15:39:33.000Z | Other_AIMA_Scripts/test.py | erensezener/aima-based-irl | fbbe28986cec0b5e58fef0f00338a180ed03759a | [
"MIT"
] | 1 | 2020-03-14T08:45:49.000Z | 2020-03-14T08:45:49.000Z | Other_AIMA_Scripts/test.py | erensezener/aima-based-irl | fbbe28986cec0b5e58fef0f00338a180ed03759a | [
"MIT"
] | 5 | 2016-09-10T19:16:56.000Z | 2018-10-10T05:09:03.000Z | __author__ = 'erensezener'
def print_test():
print "test" | 12.6 | 26 | 0.68254 |
794208a86d66398d6117b7f8230bd7f2bd9cd4fa | 68,962 | py | Python | openstack/tests/unit/cloud/test_baremetal_node.py | gouthampacha/openstacksdk | f4dd6fe5fd21fb866b43330ecbbafee5cf553ded | [
"Apache-2.0"
] | null | null | null | openstack/tests/unit/cloud/test_baremetal_node.py | gouthampacha/openstacksdk | f4dd6fe5fd21fb866b43330ecbbafee5cf553ded | [
"Apache-2.0"
] | null | null | null | openstack/tests/unit/cloud/test_baremetal_node.py | gouthampacha/openstacksdk | f4dd6fe5fd21fb866b43330ecbbafee5cf553ded | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_baremetal_node
----------------------------------
Tests for baremetal node related operations
"""
import uuid
from testscenarios import load_tests_apply_scenarios as load_tests # noqa
from openstack.cloud import exc
from openstack import exceptions
from openstack.tests import fakes
from openstack.tests.unit import base
class TestBaremetalNode(base.IronicTestCase):
def setUp(self):
super(TestBaremetalNode, self).setUp()
self.fake_baremetal_node = fakes.make_fake_machine(
self.name, self.uuid)
# TODO(TheJulia): Some tests below have fake ports,
# since they are required in some processes. Lets refactor
# them at some point to use self.fake_baremetal_port.
self.fake_baremetal_port = fakes.make_fake_port(
'00:01:02:03:04:05',
node_id=self.uuid)
def test_list_machines(self):
fake_baremetal_two = fakes.make_fake_machine('two', str(uuid.uuid4()))
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(resource='nodes'),
json={'nodes': [self.fake_baremetal_node,
fake_baremetal_two]}),
])
machines = self.cloud.list_machines()
self.assertEqual(2, len(machines))
self.assertSubdict(self.fake_baremetal_node, machines[0])
self.assertSubdict(fake_baremetal_two, machines[1])
self.assert_calls()
def test_get_machine(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
machine = self.cloud.get_machine(self.fake_baremetal_node['uuid'])
self.assertEqual(machine['uuid'],
self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_get_machine_by_mac(self):
mac_address = '00:01:02:03:04:05'
node_uuid = self.fake_baremetal_node['uuid']
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='ports',
append=['detail'],
qs_elements=['address=%s' % mac_address]),
json={'ports': [{'address': mac_address,
'node_uuid': node_uuid}]}),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
machine = self.cloud.get_machine_by_mac(mac_address)
self.assertEqual(machine['uuid'],
self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_validate_machine(self):
# NOTE(TheJulia): Note: These are only the interfaces
# that are validated, and all must be true for an
# exception to not be raised.
validate_return = {
'boot': {
'result': True,
},
'deploy': {
'result': True,
},
'management': {
'result': True,
},
'power': {
'result': True,
},
'foo': {
'result': False,
}}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'validate']),
json=validate_return),
])
self.cloud.validate_machine(self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_validate_machine_not_for_deploy(self):
validate_return = {
'deploy': {
'result': False,
'reason': 'Not ready',
},
'power': {
'result': True,
},
'foo': {
'result': False,
}}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'validate']),
json=validate_return),
])
self.cloud.validate_machine(self.fake_baremetal_node['uuid'],
for_deploy=False)
self.assert_calls()
def test_deprecated_validate_node(self):
validate_return = {
'deploy': {
'result': True,
},
'power': {
'result': True,
},
'foo': {
'result': False,
}}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'validate']),
json=validate_return),
])
self.cloud.validate_node(self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_validate_machine_raises_exception(self):
validate_return = {
'deploy': {
'result': False,
'reason': 'error!',
},
'power': {
'result': True,
'reason': None,
},
'foo': {
'result': True
}}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'validate']),
json=validate_return),
])
self.assertRaises(
exceptions.ValidationException,
self.cloud.validate_machine,
self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_patch_machine(self):
test_patch = [{
'op': 'remove',
'path': '/instance_info'}]
self.fake_baremetal_node['instance_info'] = {}
self.register_uris([
dict(method='PATCH',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node,
validate=dict(json=test_patch)),
])
result = self.cloud.patch_machine(
self.fake_baremetal_node['uuid'], test_patch)
self.assertEqual(self.fake_baremetal_node['uuid'], result['uuid'])
self.assert_calls()
def test_set_node_instance_info(self):
test_patch = [{
'op': 'add',
'path': '/foo',
'value': 'bar'}]
self.register_uris([
dict(method='PATCH',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node,
validate=dict(json=test_patch)),
])
self.cloud.set_node_instance_info(
self.fake_baremetal_node['uuid'], test_patch)
self.assert_calls()
def test_purge_node_instance_info(self):
test_patch = [{
'op': 'remove',
'path': '/instance_info'}]
self.fake_baremetal_node['instance_info'] = {}
self.register_uris([
dict(method='PATCH',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node,
validate=dict(json=test_patch)),
])
self.cloud.purge_node_instance_info(
self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_inspect_machine_fail_active(self):
self.fake_baremetal_node['provision_state'] = 'active'
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.inspect_machine,
self.fake_baremetal_node['uuid'],
wait=True,
timeout=1)
self.assert_calls()
def test_inspect_machine_fail_associated(self):
self.fake_baremetal_node['provision_state'] = 'available'
self.fake_baremetal_node['instance_uuid'] = '1234'
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.assertRaisesRegex(
exc.OpenStackCloudException,
'associated with an instance',
self.cloud.inspect_machine,
self.fake_baremetal_node['uuid'],
wait=True,
timeout=1)
self.assert_calls()
def test_inspect_machine_failed(self):
inspecting_node = self.fake_baremetal_node.copy()
self.fake_baremetal_node['provision_state'] = 'inspect failed'
self.fake_baremetal_node['last_error'] = 'kaboom!'
inspecting_node['provision_state'] = 'inspecting'
finished_node = self.fake_baremetal_node.copy()
finished_node['provision_state'] = 'manageable'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'inspect'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=inspecting_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=finished_node),
])
self.cloud.inspect_machine(self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_inspect_machine_manageable(self):
self.fake_baremetal_node['provision_state'] = 'manageable'
inspecting_node = self.fake_baremetal_node.copy()
inspecting_node['provision_state'] = 'inspecting'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'inspect'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=inspecting_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.cloud.inspect_machine(self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_inspect_machine_available(self):
available_node = self.fake_baremetal_node.copy()
available_node['provision_state'] = 'available'
manageable_node = self.fake_baremetal_node.copy()
manageable_node['provision_state'] = 'manageable'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=available_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'manage'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=manageable_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'inspect'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=manageable_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'provide'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=available_node),
])
self.cloud.inspect_machine(self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_inspect_machine_available_wait(self):
available_node = self.fake_baremetal_node.copy()
available_node['provision_state'] = 'available'
manageable_node = self.fake_baremetal_node.copy()
manageable_node['provision_state'] = 'manageable'
inspecting_node = self.fake_baremetal_node.copy()
inspecting_node['provision_state'] = 'inspecting'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=available_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'manage'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=available_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=manageable_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'inspect'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=inspecting_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=manageable_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'provide'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=available_node),
])
self.cloud.inspect_machine(
self.fake_baremetal_node['uuid'], wait=True, timeout=1)
self.assert_calls()
def test_inspect_machine_wait(self):
self.fake_baremetal_node['provision_state'] = 'manageable'
inspecting_node = self.fake_baremetal_node.copy()
inspecting_node['provision_state'] = 'inspecting'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'inspect'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=inspecting_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=inspecting_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.cloud.inspect_machine(
self.fake_baremetal_node['uuid'], wait=True, timeout=1)
self.assert_calls()
def test_inspect_machine_inspect_failed(self):
self.fake_baremetal_node['provision_state'] = 'manageable'
inspecting_node = self.fake_baremetal_node.copy()
inspecting_node['provision_state'] = 'inspecting'
inspect_fail_node = self.fake_baremetal_node.copy()
inspect_fail_node['provision_state'] = 'inspect failed'
inspect_fail_node['last_error'] = 'Earth Imploded'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'inspect'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=inspecting_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=inspect_fail_node),
])
self.assertRaises(exc.OpenStackCloudException,
self.cloud.inspect_machine,
self.fake_baremetal_node['uuid'],
wait=True, timeout=1)
self.assert_calls()
def test_set_machine_maintenace_state(self):
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'maintenance']),
validate=dict(json={'reason': 'no reason'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.cloud.set_machine_maintenance_state(
self.fake_baremetal_node['uuid'], True, reason='no reason')
self.assert_calls()
def test_set_machine_maintenace_state_false(self):
self.register_uris([
dict(
method='DELETE',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'maintenance'])),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.cloud.set_machine_maintenance_state(
self.fake_baremetal_node['uuid'], False)
self.assert_calls
def test_remove_machine_from_maintenance(self):
self.register_uris([
dict(
method='DELETE',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'maintenance'])),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.cloud.remove_machine_from_maintenance(
self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_set_machine_power_on(self):
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'power']),
validate=dict(json={'target': 'power on'})),
])
return_value = self.cloud.set_machine_power_on(
self.fake_baremetal_node['uuid'])
self.assertIsNone(return_value)
self.assert_calls()
def test_set_machine_power_on_with_retires(self):
# NOTE(TheJulia): This logic ends up testing power on/off and reboot
# as they all utilize the same helper method.
self.register_uris([
dict(
method='PUT',
status_code=503,
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'power']),
validate=dict(json={'target': 'power on'})),
dict(
method='PUT',
status_code=409,
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'power']),
validate=dict(json={'target': 'power on'})),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'power']),
validate=dict(json={'target': 'power on'})),
])
return_value = self.cloud.set_machine_power_on(
self.fake_baremetal_node['uuid'])
self.assertIsNone(return_value)
self.assert_calls()
def test_set_machine_power_off(self):
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'power']),
validate=dict(json={'target': 'power off'})),
])
return_value = self.cloud.set_machine_power_off(
self.fake_baremetal_node['uuid'])
self.assertIsNone(return_value)
self.assert_calls()
def test_set_machine_power_reboot(self):
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'power']),
validate=dict(json={'target': 'rebooting'})),
])
return_value = self.cloud.set_machine_power_reboot(
self.fake_baremetal_node['uuid'])
self.assertIsNone(return_value)
self.assert_calls()
def test_set_machine_power_reboot_failure(self):
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'power']),
status_code=400,
json={'error': 'invalid'},
validate=dict(json={'target': 'rebooting'})),
])
self.assertRaises(exc.OpenStackCloudException,
self.cloud.set_machine_power_reboot,
self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_node_set_provision_state(self):
deploy_node = self.fake_baremetal_node.copy()
deploy_node['provision_state'] = 'deploying'
active_node = self.fake_baremetal_node.copy()
active_node['provision_state'] = 'active'
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'active',
'configdrive': 'http://host/file'})),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
result = self.cloud.node_set_provision_state(
self.fake_baremetal_node['uuid'],
'active',
configdrive='http://host/file')
self.assertEqual(self.fake_baremetal_node['uuid'], result['uuid'])
self.assert_calls()
def test_node_set_provision_state_with_retries(self):
deploy_node = self.fake_baremetal_node.copy()
deploy_node['provision_state'] = 'deploying'
active_node = self.fake_baremetal_node.copy()
active_node['provision_state'] = 'active'
self.register_uris([
dict(
method='PUT',
status_code=409,
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'active',
'configdrive': 'http://host/file'})),
dict(
method='PUT',
status_code=503,
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'active',
'configdrive': 'http://host/file'})),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'active',
'configdrive': 'http://host/file'})),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.cloud.node_set_provision_state(
self.fake_baremetal_node['uuid'],
'active',
configdrive='http://host/file')
self.assert_calls()
def test_node_set_provision_state_wait_timeout(self):
deploy_node = self.fake_baremetal_node.copy()
deploy_node['provision_state'] = 'deploying'
active_node = self.fake_baremetal_node.copy()
active_node['provision_state'] = 'active'
self.fake_baremetal_node['provision_state'] = 'available'
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'active'})),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=deploy_node),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=active_node),
])
return_value = self.cloud.node_set_provision_state(
self.fake_baremetal_node['uuid'],
'active',
wait=True)
self.assertSubdict(active_node, return_value)
self.assert_calls()
def test_node_set_provision_state_wait_timeout_fails(self):
# Intentionally time out.
self.fake_baremetal_node['provision_state'] = 'deploy wait'
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'active'})),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.node_set_provision_state,
self.fake_baremetal_node['uuid'],
'active',
wait=True,
timeout=0.001)
self.assert_calls()
def test_node_set_provision_state_wait_success(self):
self.fake_baremetal_node['provision_state'] = 'active'
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'active'})),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
return_value = self.cloud.node_set_provision_state(
self.fake_baremetal_node['uuid'],
'active',
wait=True)
self.assertSubdict(self.fake_baremetal_node, return_value)
self.assert_calls()
def test_node_set_provision_state_wait_failure_cases(self):
self.fake_baremetal_node['provision_state'] = 'foo failed'
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'active'})),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.node_set_provision_state,
self.fake_baremetal_node['uuid'],
'active',
wait=True,
timeout=300)
self.assert_calls()
def test_node_set_provision_state_wait_provide(self):
self.fake_baremetal_node['provision_state'] = 'manageable'
available_node = self.fake_baremetal_node.copy()
available_node['provision_state'] = 'available'
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'provide'})),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=available_node),
])
return_value = self.cloud.node_set_provision_state(
self.fake_baremetal_node['uuid'],
'provide',
wait=True)
self.assertSubdict(available_node, return_value)
self.assert_calls()
def test_wait_for_baremetal_node_lock_locked(self):
self.fake_baremetal_node['reservation'] = 'conductor0'
unlocked_node = self.fake_baremetal_node.copy()
unlocked_node['reservation'] = None
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=unlocked_node),
])
self.assertIsNone(
self.cloud.wait_for_baremetal_node_lock(
self.fake_baremetal_node,
timeout=1))
self.assert_calls()
def test_wait_for_baremetal_node_lock_not_locked(self):
self.fake_baremetal_node['reservation'] = None
self.assertIsNone(
self.cloud.wait_for_baremetal_node_lock(
self.fake_baremetal_node,
timeout=1))
# NOTE(dtantsur): service discovery apparently requires 3 calls
self.assertEqual(3, len(self.adapter.request_history))
def test_wait_for_baremetal_node_lock_timeout(self):
self.fake_baremetal_node['reservation'] = 'conductor0'
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.wait_for_baremetal_node_lock,
self.fake_baremetal_node,
timeout=0.001)
self.assert_calls()
def test_activate_node(self):
self.fake_baremetal_node['provision_state'] = 'active'
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'active',
'configdrive': 'http://host/file'})),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
return_value = self.cloud.activate_node(
self.fake_baremetal_node['uuid'],
configdrive='http://host/file',
wait=True)
self.assertIsNone(return_value)
self.assert_calls()
def test_deactivate_node(self):
self.fake_baremetal_node['provision_state'] = 'available'
self.register_uris([
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'deleted'})),
dict(method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
return_value = self.cloud.deactivate_node(
self.fake_baremetal_node['uuid'],
wait=True)
self.assertIsNone(return_value)
self.assert_calls()
def test_register_machine(self):
mac_address = '00:01:02:03:04:05'
nics = [{'mac': mac_address}]
node_uuid = self.fake_baremetal_node['uuid']
# TODO(TheJulia): There is a lot of duplication
# in testing creation. Surely this hsould be a helper
# or something. We should fix this.
node_to_post = {
'chassis_uuid': None,
'driver': None,
'driver_info': None,
'name': self.fake_baremetal_node['name'],
'properties': None,
'uuid': node_uuid}
self.fake_baremetal_node['provision_state'] = 'available'
if 'provision_state' in node_to_post:
node_to_post.pop('provision_state')
self.register_uris([
dict(
method='POST',
uri=self.get_mock_url(
resource='nodes'),
json=self.fake_baremetal_node,
validate=dict(json=node_to_post)),
dict(
method='POST',
uri=self.get_mock_url(
resource='ports'),
validate=dict(json={'address': mac_address,
'node_uuid': node_uuid}),
json=self.fake_baremetal_port),
])
return_value = self.cloud.register_machine(nics, **node_to_post)
self.assertDictEqual(self.fake_baremetal_node, return_value)
self.assert_calls()
# TODO(TheJulia): We need to de-duplicate these tests.
# Possibly a dedicated class, although we should do it
# then as we may find differences that need to be
# accounted for newer microversions.
def test_register_machine_enroll(self):
mac_address = '00:01:02:03:04:05'
nics = [{'mac': mac_address}]
node_uuid = self.fake_baremetal_node['uuid']
node_to_post = {
'chassis_uuid': None,
'driver': None,
'driver_info': None,
'name': self.fake_baremetal_node['name'],
'properties': None,
'uuid': node_uuid}
self.fake_baremetal_node['provision_state'] = 'enroll'
manageable_node = self.fake_baremetal_node.copy()
manageable_node['provision_state'] = 'manageable'
available_node = self.fake_baremetal_node.copy()
available_node['provision_state'] = 'available'
self.register_uris([
dict(
method='POST',
uri=self.get_mock_url(
resource='nodes'),
validate=dict(json=node_to_post),
json=self.fake_baremetal_node),
dict(
method='POST',
uri=self.get_mock_url(
resource='ports'),
validate=dict(json={'address': mac_address,
'node_uuid': node_uuid}),
json=self.fake_baremetal_port),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'manage'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=manageable_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=manageable_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'provide'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=available_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=available_node),
])
# NOTE(When we migrate to a newer microversion, this test
# may require revision. It was written for microversion
# ?1.13?, which accidently got reverted to 1.6 at one
# point during code being refactored soon after the
# change landed. Presently, with the lock at 1.6,
# this code is never used in the current code path.
return_value = self.cloud.register_machine(nics, **node_to_post)
self.assertSubdict(available_node, return_value)
self.assert_calls()
def test_register_machine_enroll_wait(self):
mac_address = self.fake_baremetal_port
nics = [{'mac': mac_address}]
node_uuid = self.fake_baremetal_node['uuid']
node_to_post = {
'chassis_uuid': None,
'driver': None,
'driver_info': None,
'name': self.fake_baremetal_node['name'],
'properties': None,
'uuid': node_uuid}
self.fake_baremetal_node['provision_state'] = 'enroll'
manageable_node = self.fake_baremetal_node.copy()
manageable_node['provision_state'] = 'manageable'
available_node = self.fake_baremetal_node.copy()
available_node['provision_state'] = 'available'
self.register_uris([
dict(
method='POST',
uri=self.get_mock_url(
resource='nodes'),
validate=dict(json=node_to_post),
json=self.fake_baremetal_node),
dict(
method='POST',
uri=self.get_mock_url(
resource='ports'),
validate=dict(json={'address': mac_address,
'node_uuid': node_uuid}),
json=self.fake_baremetal_port),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'manage'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=manageable_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'provide'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=available_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=available_node),
])
return_value = self.cloud.register_machine(
nics, wait=True, **node_to_post)
self.assertSubdict(available_node, return_value)
self.assert_calls()
def test_register_machine_enroll_failure(self):
mac_address = '00:01:02:03:04:05'
nics = [{'mac': mac_address}]
node_uuid = self.fake_baremetal_node['uuid']
node_to_post = {
'chassis_uuid': None,
'driver': None,
'driver_info': None,
'name': self.fake_baremetal_node['name'],
'properties': None,
'uuid': node_uuid}
self.fake_baremetal_node['provision_state'] = 'enroll'
failed_node = self.fake_baremetal_node.copy()
failed_node['reservation'] = 'conductor0'
failed_node['provision_state'] = 'verifying'
failed_node['last_error'] = 'kaboom!'
self.register_uris([
dict(
method='POST',
uri=self.get_mock_url(
resource='nodes'),
json=self.fake_baremetal_node,
validate=dict(json=node_to_post)),
dict(
method='POST',
uri=self.get_mock_url(
resource='ports'),
validate=dict(json={'address': mac_address,
'node_uuid': node_uuid}),
json=self.fake_baremetal_port),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'manage'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=failed_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=failed_node),
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.register_machine,
nics,
**node_to_post)
self.assert_calls()
def test_register_machine_enroll_timeout(self):
mac_address = '00:01:02:03:04:05'
nics = [{'mac': mac_address}]
node_uuid = self.fake_baremetal_node['uuid']
node_to_post = {
'chassis_uuid': None,
'driver': None,
'driver_info': None,
'name': self.fake_baremetal_node['name'],
'properties': None,
'uuid': node_uuid}
self.fake_baremetal_node['provision_state'] = 'enroll'
busy_node = self.fake_baremetal_node.copy()
busy_node['reservation'] = 'conductor0'
busy_node['provision_state'] = 'verifying'
self.register_uris([
dict(
method='POST',
uri=self.get_mock_url(
resource='nodes'),
json=self.fake_baremetal_node,
validate=dict(json=node_to_post)),
dict(
method='POST',
uri=self.get_mock_url(
resource='ports'),
validate=dict(json={'address': mac_address,
'node_uuid': node_uuid}),
json=self.fake_baremetal_port),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'manage'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=busy_node),
])
# NOTE(TheJulia): This test shortcircuits the timeout loop
# such that it executes only once. The very last returned
# state to the API is essentially a busy state that we
# want to block on until it has cleared.
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.register_machine,
nics,
timeout=0.001,
lock_timeout=0.001,
**node_to_post)
self.assert_calls()
def test_register_machine_enroll_timeout_wait(self):
mac_address = '00:01:02:03:04:05'
nics = [{'mac': mac_address}]
node_uuid = self.fake_baremetal_node['uuid']
node_to_post = {
'chassis_uuid': None,
'driver': None,
'driver_info': None,
'name': self.fake_baremetal_node['name'],
'properties': None,
'uuid': node_uuid}
self.fake_baremetal_node['provision_state'] = 'enroll'
self.register_uris([
dict(
method='POST',
uri=self.get_mock_url(
resource='nodes'),
json=self.fake_baremetal_node,
validate=dict(json=node_to_post)),
dict(
method='POST',
uri=self.get_mock_url(
resource='ports'),
validate=dict(json={'address': mac_address,
'node_uuid': node_uuid}),
json=self.fake_baremetal_port),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='PUT',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'],
'states', 'provision']),
validate=dict(json={'target': 'manage'})),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.register_machine,
nics,
wait=True,
timeout=0.001,
**node_to_post)
self.assert_calls()
def test_register_machine_port_create_failed(self):
mac_address = '00:01:02:03:04:05'
nics = [{'mac': mac_address}]
node_uuid = self.fake_baremetal_node['uuid']
node_to_post = {
'chassis_uuid': None,
'driver': None,
'driver_info': None,
'name': self.fake_baremetal_node['name'],
'properties': None,
'uuid': node_uuid}
self.fake_baremetal_node['provision_state'] = 'available'
self.register_uris([
dict(
method='POST',
uri=self.get_mock_url(
resource='nodes'),
json=self.fake_baremetal_node,
validate=dict(json=node_to_post)),
dict(
method='POST',
uri=self.get_mock_url(
resource='ports'),
status_code=400,
json={'error': 'invalid'},
validate=dict(json={'address': mac_address,
'node_uuid': node_uuid})),
dict(
method='DELETE',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']])),
])
self.assertRaises(exc.OpenStackCloudException,
self.cloud.register_machine,
nics, **node_to_post)
self.assert_calls()
def test_unregister_machine(self):
mac_address = self.fake_baremetal_port['address']
nics = [{'mac': mac_address}]
port_uuid = self.fake_baremetal_port['uuid']
# NOTE(TheJulia): The two values below should be the same.
port_node_uuid = self.fake_baremetal_port['node_uuid']
self.fake_baremetal_node['provision_state'] = 'available'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='ports',
qs_elements=['address=%s' % mac_address]),
json={'ports': [{'address': mac_address,
'node_uuid': port_node_uuid,
'uuid': port_uuid}]}),
dict(
method='DELETE',
uri=self.get_mock_url(
resource='ports',
append=[self.fake_baremetal_port['uuid']])),
dict(
method='DELETE',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']])),
])
self.cloud.unregister_machine(
nics, self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_unregister_machine_locked_timeout(self):
mac_address = self.fake_baremetal_port['address']
nics = [{'mac': mac_address}]
self.fake_baremetal_node['provision_state'] = 'available'
self.fake_baremetal_node['reservation'] = 'conductor99'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.unregister_machine,
nics,
self.fake_baremetal_node['uuid'],
timeout=0.001)
self.assert_calls()
def test_unregister_machine_retries(self):
mac_address = self.fake_baremetal_port['address']
nics = [{'mac': mac_address}]
port_uuid = self.fake_baremetal_port['uuid']
# NOTE(TheJulia): The two values below should be the same.
port_node_uuid = self.fake_baremetal_port['node_uuid']
self.fake_baremetal_node['provision_state'] = 'available'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='ports',
qs_elements=['address=%s' % mac_address]),
json={'ports': [{'address': mac_address,
'node_uuid': port_node_uuid,
'uuid': port_uuid}]}),
dict(
method='DELETE',
status_code=503,
uri=self.get_mock_url(
resource='ports',
append=[self.fake_baremetal_port['uuid']])),
dict(
method='DELETE',
status_code=409,
uri=self.get_mock_url(
resource='ports',
append=[self.fake_baremetal_port['uuid']])),
dict(
method='DELETE',
uri=self.get_mock_url(
resource='ports',
append=[self.fake_baremetal_port['uuid']])),
dict(
method='DELETE',
status_code=409,
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']])),
dict(
method='DELETE',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']])),
])
self.cloud.unregister_machine(
nics, self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_unregister_machine_unavailable(self):
# This is a list of invalid states that the method
# should fail on.
invalid_states = ['active', 'cleaning', 'clean wait', 'clean failed']
mac_address = self.fake_baremetal_port['address']
nics = [{'mac': mac_address}]
url_list = []
for state in invalid_states:
self.fake_baremetal_node['provision_state'] = state
url_list.append(
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node))
self.register_uris(url_list)
for state in invalid_states:
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.unregister_machine,
nics,
self.fake_baremetal_node['uuid'])
self.assert_calls()
def test_update_machine_patch_no_action(self):
self.register_uris([dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
])
# NOTE(TheJulia): This is just testing mechanics.
update_dict = self.cloud.update_machine(
self.fake_baremetal_node['uuid'])
self.assertIsNone(update_dict['changes'])
self.assertSubdict(self.fake_baremetal_node, update_dict['node'])
self.assert_calls()
def test_attach_port_to_machine(self):
vif_id = '953ccbee-e854-450f-95fe-fe5e40d611ec'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='GET',
uri=self.get_mock_url(
service_type='network',
resource='ports.json',
base_url_append='v2.0'),
json={'ports': [{'id': vif_id}]}),
dict(
method='POST',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'], 'vifs'])),
])
self.cloud.attach_port_to_machine(self.fake_baremetal_node['uuid'],
vif_id)
self.assert_calls()
def test_detach_port_from_machine(self):
vif_id = '953ccbee-e854-450f-95fe-fe5e40d611ec'
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='GET',
uri=self.get_mock_url(
service_type='network',
resource='ports.json',
base_url_append='v2.0'),
json={'ports': [{'id': vif_id}]}),
dict(
method='DELETE',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'], 'vifs',
vif_id])),
])
self.cloud.detach_port_from_machine(self.fake_baremetal_node['uuid'],
vif_id)
self.assert_calls()
def test_list_ports_attached_to_machine(self):
vif_id = '953ccbee-e854-450f-95fe-fe5e40d611ec'
fake_port = {'id': vif_id, 'name': 'test'}
self.register_uris([
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid'], 'vifs']),
json={'vifs': [{'id': vif_id}]}),
dict(
method='GET',
uri=self.get_mock_url(
service_type='network',
resource='ports.json',
base_url_append='v2.0'),
json={'ports': [fake_port]}),
])
res = self.cloud.list_ports_attached_to_machine(
self.fake_baremetal_node['uuid'])
self.assert_calls()
self.assertEqual([fake_port], res)
class TestUpdateMachinePatch(base.IronicTestCase):
# NOTE(TheJulia): As appears, and mordred describes,
# this class utilizes black magic, which ultimately
# results in additional test runs being executed with
# the scenario name appended. Useful for lots of
# variables that need to be tested.
def setUp(self):
super(TestUpdateMachinePatch, self).setUp()
self.fake_baremetal_node = fakes.make_fake_machine(
self.name, self.uuid)
def test_update_machine_patch(self):
# The model has evolved over time, create the field if
# we don't already have it.
if self.field_name not in self.fake_baremetal_node:
self.fake_baremetal_node[self.field_name] = None
value_to_send = self.fake_baremetal_node[self.field_name]
if self.changed:
value_to_send = self.new_value
uris = [dict(
method='GET',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node),
]
if self.changed:
test_patch = [{
'op': 'replace',
'path': '/' + self.field_name,
'value': value_to_send}]
uris.append(
dict(
method='PATCH',
uri=self.get_mock_url(
resource='nodes',
append=[self.fake_baremetal_node['uuid']]),
json=self.fake_baremetal_node,
validate=dict(json=test_patch)))
self.register_uris(uris)
call_args = {self.field_name: value_to_send}
update_dict = self.cloud.update_machine(
self.fake_baremetal_node['uuid'], **call_args)
if self.changed:
self.assertEqual(['/' + self.field_name], update_dict['changes'])
else:
self.assertIsNone(update_dict['changes'])
self.assertSubdict(self.fake_baremetal_node, update_dict['node'])
self.assert_calls()
scenarios = [
('chassis_uuid', dict(field_name='chassis_uuid', changed=False)),
('chassis_uuid_changed',
dict(field_name='chassis_uuid', changed=True,
new_value='meow')),
('driver', dict(field_name='driver', changed=False)),
('driver_changed', dict(field_name='driver', changed=True,
new_value='meow')),
('driver_info', dict(field_name='driver_info', changed=False)),
('driver_info_changed', dict(field_name='driver_info', changed=True,
new_value={'cat': 'meow'})),
('instance_info', dict(field_name='instance_info', changed=False)),
('instance_info_changed',
dict(field_name='instance_info', changed=True,
new_value={'cat': 'meow'})),
('instance_uuid', dict(field_name='instance_uuid', changed=False)),
('instance_uuid_changed',
dict(field_name='instance_uuid', changed=True,
new_value='meow')),
('name', dict(field_name='name', changed=False)),
('name_changed', dict(field_name='name', changed=True,
new_value='meow')),
('properties', dict(field_name='properties', changed=False)),
('properties_changed', dict(field_name='properties', changed=True,
new_value={'cat': 'meow'}))
]
| 38.100552 | 78 | 0.506409 |
79420abd25c5129c9f8aae600cda2f0be80adb36 | 753 | py | Python | napari/plugins/_tests/conftest.py | SaraLatif99/napari | b17235ee77d30e58492368a73d7c8d8189397fa4 | [
"BSD-3-Clause"
] | null | null | null | napari/plugins/_tests/conftest.py | SaraLatif99/napari | b17235ee77d30e58492368a73d7c8d8189397fa4 | [
"BSD-3-Clause"
] | null | null | null | napari/plugins/_tests/conftest.py | SaraLatif99/napari | b17235ee77d30e58492368a73d7c8d8189397fa4 | [
"BSD-3-Clause"
] | null | null | null | import pytest
import os
import sys
from napari.plugins import NapariPluginManager
import napari.plugins._builtins
@pytest.fixture
def plugin_manager():
"""PluginManager fixture that loads some test plugins"""
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
plugin_manager = NapariPluginManager(autodiscover=fixture_path)
assert fixture_path not in sys.path, 'discover path leaked into sys.path'
return plugin_manager
@pytest.fixture
def builtin_plugin_manager(plugin_manager):
for mod in plugin_manager.get_plugins():
if mod != napari.plugins._builtins:
plugin_manager.unregister(mod)
assert plugin_manager.get_plugins() == set([napari.plugins._builtins])
return plugin_manager
| 31.375 | 77 | 0.763612 |
79420b687945d19c0d5ddae5c3336081cc37dac9 | 483 | py | Python | breathing.py | dotimothy/TPIOPi | 13a68ca88f5a71f1d6984c5f6c3c84d107b12fd8 | [
"MIT"
] | 3 | 2020-09-24T22:33:20.000Z | 2021-08-29T06:08:33.000Z | breathing.py | dotimothy/TPIOPi | 13a68ca88f5a71f1d6984c5f6c3c84d107b12fd8 | [
"MIT"
] | null | null | null | breathing.py | dotimothy/TPIOPi | 13a68ca88f5a71f1d6984c5f6c3c84d107b12fd8 | [
"MIT"
] | null | null | null | try:
import RPi.GPIO as GPIO
except ModuleNotFoundError:
print("You don't have GPIO Pins, so you can't run this program!"),
exit()
import time
led = input("Please Input Led '+' Pin: ")
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(led,GPIO.OUT)
pwm = GPIO.PWM(led, 100)
pwm.start(0)
while(True):
for i in range(0,100):
pwm.ChangeDutyCycle(i)
time.sleep(0.005)
for j in range(100,0,-1):
pwm.ChangeDutyCycle(j)
time.sleep(0.005)
pwm.stop()
GPIO.cleanup()
| 20.125 | 67 | 0.701863 |
79420b6a99bd5cfb6f4b3977b635a516c2aa5821 | 6,677 | py | Python | lib/tests/test_client.py | OneIdentity/safeguard-sessions-plugin-duo-mfa | d388e06def8ef79e56e8fcde6726fa9e4738e679 | [
"MIT"
] | 3 | 2019-04-16T12:41:36.000Z | 2020-08-17T20:45:10.000Z | lib/tests/test_client.py | OneIdentity/safeguard-sessions-plugin-duo-mfa | d388e06def8ef79e56e8fcde6726fa9e4738e679 | [
"MIT"
] | 2 | 2019-11-04T11:23:25.000Z | 2020-04-03T16:28:58.000Z | lib/tests/test_client.py | OneIdentity/safeguard-sessions-plugin-duo-mfa | d388e06def8ef79e56e8fcde6726fa9e4738e679 | [
"MIT"
] | 2 | 2019-04-16T12:28:52.000Z | 2020-07-22T08:54:09.000Z | #
# Copyright (c) 2019 One Identity
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import pytest
from ssl import SSLError
from unittest.mock import patch
from safeguard.sessions.plugin.mfa_client import MFAAuthenticationFailure, MFACommunicationError, MFAServiceUnreachable
from safeguard.sessions.plugin import AAResponse
from ..client import Client
from six.moves.http_client import NotConnected
@pytest.fixture
def inject_connection_error(mocker):
request_mock = mocker.patch("duo_client.client.Client._make_request")
request_mock.side_effect = NotConnected
@pytest.mark.interactive
def test_otp_auth_ok(client, duo_user, interactive):
otp = interactive.askforinput("Please enter OTP generated with DUO device, e.g. DUO mobile")
assert client.otp_authenticate(duo_user, otp)
@pytest.mark.interactive
def test_otp_ask_for_new_otp_if_already_used(client, duo_user, interactive):
otp = interactive.askforinput("Please enter the previous OTP")
result = client.otp_authenticate(duo_user, otp)
assert result == AAResponse.need_info(
**{
"key": "otp",
"question": "This passcode has already been used. Please generate a new passcode and try again. ",
"disable_echo": False,
}
)
@pytest.mark.interactive
def test_push_auth_ok(client, duo_user, interactive):
interactive.message("Please ACCEPT the push notification in DUO application")
assert client.push_authenticate(duo_user)
@pytest.mark.interactive
def test_push_auth_user_decline(client, duo_user, interactive):
interactive.message("Please REJECT the push notification in DUO application")
with pytest.raises(MFAAuthenticationFailure) as e:
client.push_authenticate(duo_user)
assert e.match("Login request denied")
@pytest.mark.interactive
def test_bypass_auth_without_bypass_code_push(client, duo_bypass_user, interactive):
result = client.push_authenticate(duo_bypass_user)
assert result == AAResponse.accept(reason="User configured as bypass user on Duo.")
@pytest.mark.interactive
def test_bypass_auth_without_bypass_code_otp(client, duo_bypass_user, interactive):
otp = interactive.askforinput("Please enter OTP whatever you like")
result = client.otp_authenticate(duo_bypass_user, otp)
assert result == AAResponse.accept(reason="User configured as bypass user on Duo.")
@patch("lib.client.Auth")
def test_push_auth_timeout(patcher, duo_user, interactive):
with pytest.raises(MFAAuthenticationFailure) as e:
instance = patcher.return_value
instance.preauth.return_value = {"result": "auth", "devices": [{"capabilities": ["push"]}]}
instance.auth.side_effect = SSLError("The read operation timed out.")
client = Client("ikey", "skey", "host")
client.push_authenticate(duo_user)
assert e.match("timed out")
def test_bypass_auth_ok(client, duo_user, duo_passcode):
assert client.otp_authenticate(duo_user, duo_passcode)
def test_otp_auth_wrong_passcode(client, duo_user, duo_wrong_passcode):
with pytest.raises(MFAAuthenticationFailure) as e:
client.otp_authenticate(duo_user, duo_wrong_passcode)
assert e.match("Incorrect passcode")
def test_otp_auth_unknown_host(client, duo_user, duo_passcode, inject_connection_error):
with pytest.raises(MFAServiceUnreachable):
client.otp_authenticate(duo_user, duo_passcode)
def test_otp_auth_unknown_user(client, duo_wrong_user, duo_passcode):
with pytest.raises(MFAAuthenticationFailure) as e:
client.otp_authenticate(duo_wrong_user, duo_passcode)
assert e.match("Enroll an authentication")
def test_otp_auth_invalid_apikey(client, duo_user, duo_passcode):
with pytest.raises(MFACommunicationError) as e:
client._duo.skey = ""
client.otp_authenticate(duo_user, duo_passcode)
assert e.match("Invalid signature")
def test_push_auth_no_push_device(client, duo_user_without_device):
with pytest.raises(MFAAuthenticationFailure) as e:
client.push_authenticate(duo_user_without_device)
assert e.match("No push capable")
def test_push_auth_unkown_user(client, duo_wrong_user):
with pytest.raises(MFAAuthenticationFailure) as e:
client.push_authenticate(duo_wrong_user)
assert e.match("Enroll an authentication")
def test_push_auth_unknown_host(client, duo_user, inject_connection_error):
with pytest.raises(MFAServiceUnreachable):
client.push_authenticate(duo_user)
def test_push_auth_invalid_apikey(client, duo_user):
with pytest.raises(MFACommunicationError) as e:
client._duo.skey = ""
client.push_authenticate(duo_user)
assert e.match("Invalid signature")
def test_duo_set_proxy():
client = Client("ikey", "skey", "host", http_proxy_settings=dict(server="proxy", port="3128"))
assert client._duo.proxy_host == "proxy"
assert client._duo.proxy_port == 3128
def test_duo_proxy_is_not_set_when_settings_omitted():
client = Client("ikey", "skey", "host", http_proxy_settings=None)
assert client._duo.proxy_host is None
assert client._duo.proxy_port is None
def test_duo_proxy_is_not_set_when_proxy_settings_not_set():
client = Client("ikey", "skey", "host", http_proxy_settings=dict(username="u", password="p"))
assert client._duo.proxy_host is None
assert client._duo.proxy_port is None
def test_duo_set_proxy_auth():
client = Client(
"ikey", "skey", "host", http_proxy_settings=dict(server="proxy", port="3128", username="u", password="p")
)
# dTpw is Base64 encoded u:p
assert client._duo.proxy_headers == {"Proxy-Authorization": "Basic dTpw"}
| 37.723164 | 119 | 0.759623 |
79420ba289da56d1dea625837978cc3906322fc1 | 8,578 | py | Python | nion/ui/TreeCanvasItem.py | icbicket/nionui | d4a839d6c5978032e6b5108b058fbe62c33f632d | [
"Apache-2.0"
] | 3 | 2018-12-18T23:05:00.000Z | 2019-11-26T19:48:04.000Z | nion/ui/TreeCanvasItem.py | icbicket/nionui | d4a839d6c5978032e6b5108b058fbe62c33f632d | [
"Apache-2.0"
] | 36 | 2017-07-15T02:07:18.000Z | 2022-03-01T16:59:08.000Z | nion/ui/TreeCanvasItem.py | icbicket/nionui | d4a839d6c5978032e6b5108b058fbe62c33f632d | [
"Apache-2.0"
] | 12 | 2017-04-03T20:05:46.000Z | 2021-06-09T05:14:44.000Z | """Display a tree of drawable cells.
"""
from __future__ import annotations
# standard libraries
import copy
import dataclasses
import functools
import json
import typing
# third party libraries
# none
# local libraries
from nion.ui import CanvasItem
from nion.ui import UserInterface
from nion.utils import Geometry
_ValuePath = typing.Sequence[typing.Union[int, str]]
@dataclasses.dataclass
class TreeItem:
canvas_item: CanvasItem.AbstractCanvasItem
item_type: str
is_expanded: bool
value_path: _ValuePath
class TreeCanvasItemDelegate(typing.Protocol):
def build_items(self, get_font_metrics_fn: typing.Callable[[str, str], UserInterface.FontMetrics],
item_width: typing.Optional[int]) -> typing.Sequence[TreeItem]: ...
def toggle_is_expanded(self, value_path_key: str) -> None: ...
class TreeCanvasItem(CanvasItem.CanvasItemComposition):
"""
Takes a delegate that supports the following properties, methods, and optional methods:
Properties:
None
Methods:
toggle_is_expanded(value_path) -> None
build_items(get_font_metrics_fn, item_width) -> CanvasItem
Optional methods:
None
Call reconstruct when data or selection changes.
"""
def __init__(self, get_font_metrics_fn: typing.Callable[[str, str], UserInterface.FontMetrics], delegate: TreeCanvasItemDelegate) -> None:
super().__init__()
self.__get_font_metrics_fn = get_font_metrics_fn
self.__delegate = delegate
# configure super
self.wants_mouse_events = True
self.focusable = True
# internal variables
self.__mouse_pressed = False
self.__mouse_index: typing.Optional[int] = None
self.__mouse_position: typing.Optional[Geometry.IntPoint] = None
self.__mouse_dragging = False
self.__mouse_item: typing.Optional[_ValuePath] = None
self.__selected_value_paths: typing.Set[str] = set()
self.layout = CanvasItem.CanvasItemColumnLayout()
self.on_content_height_changed: typing.Optional[typing.Callable[[int], None]] = None
def close(self) -> None:
self.on_content_height_changed = None
super().close()
def __is_selected(self, value_path: _ValuePath) -> bool:
return json.dumps(value_path) in self.__selected_value_paths
def reconstruct(self) -> None:
for canvas_item in copy.copy(self.canvas_items):
self._remove_canvas_item(canvas_item)
indent_size = 16
canvas_bounds = self.canvas_bounds
item_width = int(canvas_bounds.width) if canvas_bounds else None
canvas_height = 0
ITEM_HEIGHT = 18
for tree_item in self.__delegate.build_items(self.__get_font_metrics_fn, item_width):
indent = (len(tree_item.value_path) - 1) * indent_size
item_row = CanvasItem.CanvasItemComposition()
item_row.update_sizing(item_row.sizing.with_fixed_height(ITEM_HEIGHT))
item_row.layout = CanvasItem.CanvasItemRowLayout()
item_row.add_spacing(indent)
if tree_item.item_type == "parent":
twist_down_canvas_item = CanvasItem.TwistDownCanvasItem()
twist_down_canvas_item.update_sizing(twist_down_canvas_item.sizing.with_fixed_size(Geometry.IntSize(height=ITEM_HEIGHT, width=16)))
twist_down_canvas_item.checked = tree_item.is_expanded
def twist_down_clicked(toggle_value_path: _ValuePath) -> None:
self.__toggle_is_expanded(toggle_value_path)
twist_down_canvas_item.on_button_clicked = functools.partial(twist_down_clicked, tree_item.value_path)
item_row.add_canvas_item(twist_down_canvas_item)
else:
item_row.add_spacing(indent_size)
item_row.add_canvas_item(tree_item.canvas_item)
item_row.add_stretch()
setattr(item_row, "value_path", tree_item.value_path)
self.add_canvas_item(item_row)
canvas_height += ITEM_HEIGHT
self.update()
if callable(self.on_content_height_changed):
self.on_content_height_changed(canvas_height)
def __set_selection(self, value_path: _ValuePath) -> None:
self.__selected_value_paths.clear()
self.__selected_value_paths.add(json.dumps(value_path))
def __extend_selection(self, value_path: _ValuePath) -> None:
pass
def __toggle_selection(self, value_path: _ValuePath) -> None:
value_path_key = json.dumps(value_path)
if value_path_key in self.__selected_value_paths:
self.__selected_value_paths.remove(value_path_key)
else:
self.__selected_value_paths.add(value_path_key)
def __toggle_is_expanded(self, value_path: _ValuePath) -> None:
value_path_key = json.dumps(value_path)
self.__delegate.toggle_is_expanded(value_path_key)
self.reconstruct()
def __context_menu_event(self, value_path: typing.Optional[_ValuePath], x: int, y: int, gx: int, gy: int) -> bool:
pass
def __drag_started(self, value_path: typing.Optional[_ValuePath], x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> None:
pass
def __delete(self) -> None:
pass
def __adjust_selection(self, action: str, extend: bool) -> None:
pass
def __value_path_at_point(self, p: Geometry.IntPoint) -> typing.Optional[_ValuePath]:
for canvas_item in self.canvas_items_at_point(p.x, p.y):
if hasattr(canvas_item, "value_path"):
return typing.cast(_ValuePath, getattr(canvas_item, "value_path"))
return None
def context_menu_event(self, x: int, y: int, gx: int, gy: int) -> bool:
p = Geometry.IntPoint(y=y, x=x)
value_path = self.__value_path_at_point(p)
if value_path:
if not self.__is_selected(value_path):
self.__set_selection(value_path)
return self.__context_menu_event(value_path, x, y, gx, gy)
return self.__context_menu_event(None, x, y, gx, gy)
def mouse_pressed(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
p = Geometry.IntPoint(y=y, x=x)
value_path = self.__value_path_at_point(p)
if value_path:
if modifiers.shift:
self.__extend_selection(value_path)
elif modifiers.control:
self.__toggle_selection(value_path)
else:
self.__set_selection(value_path)
self.__mouse_pressed = True
self.__mouse_position = Geometry.IntPoint(y=y, x=x)
self.__mouse_item = value_path
return True
return super().mouse_pressed(x, y, modifiers)
def mouse_released(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
if self.__mouse_pressed:
# double check whether mouse_released has been called explicitly as part of a drag.
# see https://bugreports.qt.io/browse/QTBUG-40733
pass # leave this here for future reference
self.__mouse_pressed = False
self.__mouse_item = None
self.__mouse_position = None
self.__mouse_dragging = False
return True
def mouse_position_changed(self, x: int, y: int, modifiers: UserInterface.KeyboardModifiers) -> bool:
if self.__mouse_pressed and self.__mouse_position:
mouse_position_f = self.__mouse_position.to_float_point()
point_f = Geometry.FloatPoint(y=y, x=x)
if not self.__mouse_dragging and Geometry.distance(mouse_position_f, point_f) > 8:
self.__mouse_dragging = True
self.__drag_started(self.__mouse_item, x, y, modifiers)
# once a drag starts, mouse release will not be called; call it here instead
self.mouse_released(x, y, modifiers)
return True
return super().mouse_position_changed(x, y, modifiers)
def key_pressed(self, key: UserInterface.Key) -> bool:
if key.is_delete:
self.__delete()
return True
if key.is_up_arrow:
self.__adjust_selection("up", key.modifiers.shift)
return True
if key.is_down_arrow:
self.__adjust_selection("down", key.modifiers.shift)
return True
return super().key_pressed(key)
def handle_select_all(self) -> bool:
self.__adjust_selection("all", False)
return True
| 39.712963 | 147 | 0.671019 |
79420c4f6ac17b9af0afd3ab60d5c30154187af6 | 193 | py | Python | flegeapp/flegeapp/doctype/pflege_patient/pflege_patient.py | oderao/flegeapp | 5fa7ea17feb271bd84dffe45a58278a4b35813f2 | [
"MIT"
] | 1 | 2022-03-16T18:15:48.000Z | 2022-03-16T18:15:48.000Z | flegeapp/flegeapp/doctype/pflege_patient/pflege_patient.py | oderao/flegeapp | 5fa7ea17feb271bd84dffe45a58278a4b35813f2 | [
"MIT"
] | null | null | null | flegeapp/flegeapp/doctype/pflege_patient/pflege_patient.py | oderao/flegeapp | 5fa7ea17feb271bd84dffe45a58278a4b35813f2 | [
"MIT"
] | null | null | null | # Copyright (c) 2021, Flege and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class PflegePatient(Document):
pass
| 21.444444 | 49 | 0.792746 |
79420c905a93f413c5e25fd6ece77a65862cbc2a | 718 | py | Python | afm/reaction.py | lily90502/AutoFragmentModeling | c18b65e57d311c416121bdd5325bc8cae522cb55 | [
"MIT"
] | 3 | 2019-09-25T16:35:04.000Z | 2021-03-16T13:15:46.000Z | afm/reaction.py | lily90502/AutoFragmentModeling | c18b65e57d311c416121bdd5325bc8cae522cb55 | [
"MIT"
] | 1 | 2019-08-29T14:13:55.000Z | 2019-08-29T14:13:55.000Z | afm/reaction.py | lily90502/AutoFragmentModeling | c18b65e57d311c416121bdd5325bc8cae522cb55 | [
"MIT"
] | 2 | 2020-05-25T14:41:24.000Z | 2021-04-09T06:32:17.000Z |
class FragmentReaction(object):
def __init__(self,
index=-1,
reactants=None,
products=None,
kinetics=None,
reversible=False,
pairs=None,
family=None,
reaction_repr=None
):
self.index = index
self.reactants = reactants
self.products = products
self.kinetics = kinetics
self.reversible = reversible
self.pairs = pairs
self.family = family
self.reaction_repr = reaction_repr
def __str__(self):
"""
Return a string representation of the reaction, in the form 'A + B <=> C + D'.
"""
arrow = ' <=> '
if not self.reversible: arrow = ' => '
return arrow.join([' + '.join([str(s) for s in self.reactants]), ' + '.join([str(s) for s in self.products])])
| 22.4375 | 112 | 0.640669 |
79420d6f30388d7e7c26edf7699284fec6ee827f | 48,020 | py | Python | tests/components/samsungtv/test_config_flow.py | rklomp/home-assistant-core | e7b05ef452dc574a209bbbb79d008e1c85a807a7 | [
"Apache-2.0"
] | null | null | null | tests/components/samsungtv/test_config_flow.py | rklomp/home-assistant-core | e7b05ef452dc574a209bbbb79d008e1c85a807a7 | [
"Apache-2.0"
] | 15 | 2021-11-23T16:10:30.000Z | 2022-03-31T06:25:27.000Z | tests/components/samsungtv/test_config_flow.py | rklomp/home-assistant-core | e7b05ef452dc574a209bbbb79d008e1c85a807a7 | [
"Apache-2.0"
] | null | null | null | """Tests for Samsung TV config flow."""
import socket
from unittest.mock import Mock, PropertyMock, call, patch
from samsungctl.exceptions import AccessDenied, UnhandledResponse
from samsungtvws.exceptions import ConnectionFailure, HttpApiError
from websocket import WebSocketException, WebSocketProtocolException
from homeassistant import config_entries
from homeassistant.components import dhcp, zeroconf
from homeassistant.components.samsungtv.const import (
CONF_MANUFACTURER,
CONF_MODEL,
DEFAULT_MANUFACTURER,
DOMAIN,
LEGACY_PORT,
METHOD_LEGACY,
METHOD_WEBSOCKET,
RESULT_AUTH_MISSING,
RESULT_CANNOT_CONNECT,
RESULT_NOT_SUPPORTED,
RESULT_UNKNOWN_HOST,
TIMEOUT_REQUEST,
TIMEOUT_WEBSOCKET,
)
from homeassistant.components.ssdp import (
ATTR_SSDP_LOCATION,
ATTR_UPNP_FRIENDLY_NAME,
ATTR_UPNP_MANUFACTURER,
ATTR_UPNP_MODEL_NAME,
ATTR_UPNP_UDN,
)
from homeassistant.const import (
CONF_HOST,
CONF_ID,
CONF_IP_ADDRESS,
CONF_MAC,
CONF_METHOD,
CONF_NAME,
CONF_PORT,
CONF_TOKEN,
)
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
from tests.components.samsungtv.conftest import (
RESULT_ALREADY_CONFIGURED,
RESULT_ALREADY_IN_PROGRESS,
)
MOCK_IMPORT_DATA = {
CONF_HOST: "fake_host",
CONF_NAME: "fake",
CONF_PORT: 55000,
}
MOCK_IMPORT_DATA_WITHOUT_NAME = {
CONF_HOST: "fake_host",
}
MOCK_IMPORT_WSDATA = {
CONF_HOST: "fake_host",
CONF_NAME: "fake",
CONF_PORT: 8002,
}
MOCK_USER_DATA = {CONF_HOST: "fake_host", CONF_NAME: "fake_name"}
MOCK_SSDP_DATA = {
ATTR_SSDP_LOCATION: "https://fake_host:12345/test",
ATTR_UPNP_FRIENDLY_NAME: "[TV] fake_name",
ATTR_UPNP_MANUFACTURER: "Samsung fake_manufacturer",
ATTR_UPNP_MODEL_NAME: "fake_model",
ATTR_UPNP_UDN: "uuid:0d1cef00-00dc-1000-9c80-4844f7b172de",
}
MOCK_SSDP_DATA_NOPREFIX = {
ATTR_SSDP_LOCATION: "http://fake2_host:12345/test",
ATTR_UPNP_FRIENDLY_NAME: "fake2_name",
ATTR_UPNP_MANUFACTURER: "Samsung fake2_manufacturer",
ATTR_UPNP_MODEL_NAME: "fake2_model",
ATTR_UPNP_UDN: "uuid:0d1cef00-00dc-1000-9c80-4844f7b172df",
}
MOCK_SSDP_DATA_WRONGMODEL = {
ATTR_SSDP_LOCATION: "http://fake2_host:12345/test",
ATTR_UPNP_FRIENDLY_NAME: "fake2_name",
ATTR_UPNP_MANUFACTURER: "fake2_manufacturer",
ATTR_UPNP_MODEL_NAME: "HW-Qfake",
ATTR_UPNP_UDN: "uuid:0d1cef00-00dc-1000-9c80-4844f7b172df",
}
MOCK_DHCP_DATA = dhcp.DhcpServiceInfo(
ip="fake_host", macaddress="aa:bb:cc:dd:ee:ff", hostname="fake_hostname"
)
EXISTING_IP = "192.168.40.221"
MOCK_ZEROCONF_DATA = zeroconf.ZeroconfServiceInfo(
host="fake_host",
port=1234,
properties={
"deviceid": "aa:bb:cc:dd:ee:ff",
"manufacturer": "fake_manufacturer",
"model": "fake_model",
"serialNumber": "fake_serial",
},
)
MOCK_OLD_ENTRY = {
CONF_HOST: "fake_host",
CONF_ID: "0d1cef00-00dc-1000-9c80-4844f7b172de_old",
CONF_IP_ADDRESS: EXISTING_IP,
CONF_METHOD: "legacy",
CONF_PORT: None,
}
MOCK_LEGACY_ENTRY = {
CONF_HOST: EXISTING_IP,
CONF_ID: "0d1cef00-00dc-1000-9c80-4844f7b172de_old",
CONF_METHOD: "legacy",
CONF_PORT: None,
}
MOCK_WS_ENTRY = {
CONF_HOST: "fake_host",
CONF_METHOD: METHOD_WEBSOCKET,
CONF_PORT: 8002,
CONF_MODEL: "any",
CONF_NAME: "any",
}
MOCK_DEVICE_INFO = {
"device": {
"type": "Samsung SmartTV",
"name": "fake_name",
"modelName": "fake_model",
},
"id": "123",
}
MOCK_DEVICE_INFO_2 = {
"device": {
"type": "Samsung SmartTV",
"name": "fake2_name",
"modelName": "fake2_model",
},
"id": "345",
}
AUTODETECT_LEGACY = {
"name": "HomeAssistant",
"description": "HomeAssistant",
"id": "ha.component.samsung",
"method": "legacy",
"port": None,
"host": "fake_host",
"timeout": TIMEOUT_REQUEST,
}
AUTODETECT_WEBSOCKET_PLAIN = {
"host": "fake_host",
"name": "HomeAssistant",
"port": 8001,
"timeout": TIMEOUT_REQUEST,
"token": None,
}
AUTODETECT_WEBSOCKET_SSL = {
"host": "fake_host",
"name": "HomeAssistant",
"port": 8002,
"timeout": TIMEOUT_REQUEST,
"token": None,
}
DEVICEINFO_WEBSOCKET_SSL = {
"host": "fake_host",
"name": "HomeAssistant",
"port": 8002,
"timeout": TIMEOUT_WEBSOCKET,
"token": "123456789",
}
async def test_user_legacy(hass: HomeAssistant, remote: Mock):
"""Test starting a flow by user."""
# show form
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
# entry was added
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_USER_DATA
)
# legacy tv entry created
assert result["type"] == "create_entry"
assert result["title"] == "fake_name"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "fake_name"
assert result["data"][CONF_METHOD] == "legacy"
assert result["data"][CONF_MANUFACTURER] == DEFAULT_MANUFACTURER
assert result["data"][CONF_MODEL] is None
assert result["result"].unique_id is None
async def test_user_websocket(hass: HomeAssistant, remotews: Mock):
"""Test starting a flow by user."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote", side_effect=OSError("Boom")
):
# show form
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
# entry was added
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_USER_DATA
)
# websocket tv entry created
assert result["type"] == "create_entry"
assert result["title"] == "Living Room (82GXARRS)"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "Living Room"
assert result["data"][CONF_METHOD] == "websocket"
assert result["data"][CONF_MANUFACTURER] == "Samsung"
assert result["data"][CONF_MODEL] == "82GXARRS"
assert result["result"].unique_id == "be9554b9-c9fb-41f4-8920-22da015376a4"
async def test_user_legacy_missing_auth(
hass: HomeAssistant, remote: Mock, remotews: Mock
):
"""Test starting a flow by user with authentication."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=AccessDenied("Boom"),
):
# legacy device missing authentication
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_AUTH_MISSING
async def test_user_legacy_not_supported(hass: HomeAssistant, remote: Mock):
"""Test starting a flow by user for not supported device."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=UnhandledResponse("Boom"),
):
# legacy device not supported
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_NOT_SUPPORTED
async def test_user_websocket_not_supported(hass: HomeAssistant, remotews: Mock):
"""Test starting a flow by user for not supported device."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS",
side_effect=WebSocketProtocolException("Boom"),
):
# websocket device not supported
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_NOT_SUPPORTED
async def test_user_not_successful(hass: HomeAssistant, remotews: Mock):
"""Test starting a flow by user but no connection found."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS",
side_effect=OSError("Boom"),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_CANNOT_CONNECT
async def test_user_not_successful_2(hass: HomeAssistant, remotews: Mock):
"""Test starting a flow by user but no connection found."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS",
side_effect=ConnectionFailure("Boom"),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_CANNOT_CONNECT
async def test_ssdp(hass: HomeAssistant, remote: Mock):
"""Test starting a flow from discovery."""
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWSBridge.device_info",
return_value=MOCK_DEVICE_INFO,
), patch("getmac.get_mac_address", return_value="aa:bb:cc:dd:ee:ff"):
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# entry was added
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "create_entry"
assert result["title"] == "fake_name (fake_model)"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "fake_name"
assert result["data"][CONF_MANUFACTURER] == "Samsung fake_manufacturer"
assert result["data"][CONF_MODEL] == "fake_model"
assert result["result"].unique_id == "0d1cef00-00dc-1000-9c80-4844f7b172de"
async def test_ssdp_noprefix(hass: HomeAssistant, remote: Mock):
"""Test starting a flow from discovery without prefixes."""
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWSBridge.device_info",
return_value=MOCK_DEVICE_INFO_2,
), patch("getmac.get_mac_address", return_value="aa:bb:cc:dd:ee:ff"):
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data=MOCK_SSDP_DATA_NOPREFIX,
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
with patch(
"homeassistant.components.samsungtv.bridge.Remote.__enter__",
return_value=True,
):
# entry was added
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "create_entry"
assert result["title"] == "fake2_name (fake2_model)"
assert result["data"][CONF_HOST] == "fake2_host"
assert result["data"][CONF_NAME] == "fake2_name"
assert result["data"][CONF_MANUFACTURER] == "Samsung fake2_manufacturer"
assert result["data"][CONF_MODEL] == "fake2_model"
assert result["result"].unique_id == "0d1cef00-00dc-1000-9c80-4844f7b172df"
async def test_ssdp_legacy_missing_auth(
hass: HomeAssistant, remote: Mock, remotews: Mock
):
"""Test starting a flow from discovery with authentication."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=AccessDenied("Boom"),
):
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# missing authentication
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVLegacyBridge.try_connect",
return_value=RESULT_AUTH_MISSING,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_AUTH_MISSING
async def test_ssdp_legacy_not_supported(
hass: HomeAssistant, remote: Mock, remotews: Mock
):
"""Test starting a flow from discovery for not supported device."""
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVLegacyBridge.try_connect",
return_value=RESULT_NOT_SUPPORTED,
):
# device not supported
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_NOT_SUPPORTED
async def test_ssdp_websocket_success_populates_mac_address(
hass: HomeAssistant,
remote: Mock,
remotews: Mock,
):
"""Test starting a flow from ssdp for a supported device populates the mac."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "create_entry"
assert result["title"] == "Living Room (82GXARRS)"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "Living Room"
assert result["data"][CONF_MAC] == "aa:bb:cc:dd:ee:ff"
assert result["data"][CONF_MANUFACTURER] == "Samsung fake_manufacturer"
assert result["data"][CONF_MODEL] == "82GXARRS"
assert result["result"].unique_id == "0d1cef00-00dc-1000-9c80-4844f7b172de"
async def test_ssdp_websocket_not_supported(hass: HomeAssistant, remote: Mock):
"""Test starting a flow from discovery for not supported device."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS",
side_effect=WebSocketProtocolException("Boom"),
):
# device not supported
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_NOT_SUPPORTED
async def test_ssdp_model_not_supported(hass: HomeAssistant, remote: Mock):
"""Test starting a flow from discovery."""
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data=MOCK_SSDP_DATA_WRONGMODEL,
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_NOT_SUPPORTED
async def test_ssdp_not_successful(
hass: HomeAssistant, remote: Mock, no_mac_address: Mock
):
"""Test starting a flow from discovery but no device found."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS",
side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWSBridge.device_info",
return_value=MOCK_DEVICE_INFO,
):
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# device not found
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_CANNOT_CONNECT
async def test_ssdp_not_successful_2(
hass: HomeAssistant, remote: Mock, no_mac_address: Mock
):
"""Test starting a flow from discovery but no device found."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS",
side_effect=ConnectionFailure("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWSBridge.device_info",
return_value=MOCK_DEVICE_INFO,
):
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# device not found
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_CANNOT_CONNECT
async def test_ssdp_already_in_progress(hass: HomeAssistant, remote: Mock):
"""Test starting a flow from discovery twice."""
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWSBridge.device_info",
return_value=MOCK_DEVICE_INFO,
), patch("getmac.get_mac_address", return_value="aa:bb:cc:dd:ee:ff"):
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# failed as already in progress
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_ALREADY_IN_PROGRESS
async def test_ssdp_already_configured(hass: HomeAssistant, remote: Mock):
"""Test starting a flow from discovery when already configured."""
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWSBridge.device_info",
return_value=MOCK_DEVICE_INFO,
), patch("getmac.get_mac_address", return_value="aa:bb:cc:dd:ee:ff"):
# entry was added
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
entry = result["result"]
assert entry.data[CONF_MANUFACTURER] == DEFAULT_MANUFACTURER
assert entry.data[CONF_MODEL] is None
assert entry.unique_id is None
# failed as already configured
result2 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result2["type"] == "abort"
assert result2["reason"] == RESULT_ALREADY_CONFIGURED
# check updated device info
assert entry.unique_id == "0d1cef00-00dc-1000-9c80-4844f7b172de"
async def test_import_legacy(hass: HomeAssistant, remote: Mock):
"""Test importing from yaml with hostname."""
with patch(
"homeassistant.components.samsungtv.config_flow.socket.gethostbyname",
return_value="fake_host",
), patch("getmac.get_mac_address", return_value="aa:bb:cc:dd:ee:ff"):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_IMPORT_DATA,
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "fake"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "fake"
assert result["data"][CONF_MANUFACTURER] == "Samsung"
assert result["result"].unique_id is None
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0].data[CONF_METHOD] == METHOD_LEGACY
assert entries[0].data[CONF_PORT] == LEGACY_PORT
async def test_import_legacy_without_name(
hass: HomeAssistant,
remote: Mock,
remotews_no_device_info: Mock,
no_mac_address: Mock,
):
"""Test importing from yaml without a name."""
with patch(
"homeassistant.components.samsungtv.config_flow.socket.gethostbyname",
return_value="fake_host",
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_IMPORT_DATA_WITHOUT_NAME,
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "fake_host"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_MANUFACTURER] == "Samsung"
assert result["result"].unique_id is None
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0].data[CONF_METHOD] == METHOD_LEGACY
assert entries[0].data[CONF_PORT] == LEGACY_PORT
async def test_import_websocket(hass: HomeAssistant, remotews: Mock):
"""Test importing from yaml with hostname."""
with patch(
"homeassistant.components.samsungtv.config_flow.socket.gethostbyname",
return_value="fake_host",
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_IMPORT_WSDATA,
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "fake"
assert result["data"][CONF_METHOD] == METHOD_WEBSOCKET
assert result["data"][CONF_PORT] == 8002
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "fake"
assert result["data"][CONF_MANUFACTURER] == "Samsung"
assert result["result"].unique_id is None
async def test_import_websocket_without_port(hass: HomeAssistant, remotews: Mock):
"""Test importing from yaml with hostname by no port."""
with patch(
"homeassistant.components.samsungtv.config_flow.socket.gethostbyname",
return_value="fake_host",
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_IMPORT_WSDATA,
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "fake"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "fake"
assert result["data"][CONF_MANUFACTURER] == "Samsung"
assert result["result"].unique_id is None
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0].data[CONF_METHOD] == METHOD_WEBSOCKET
assert entries[0].data[CONF_PORT] == 8002
async def test_import_unknown_host(hass: HomeAssistant, remotews: Mock):
"""Test importing from yaml with hostname that does not resolve."""
with patch(
"homeassistant.components.samsungtv.config_flow.socket.gethostbyname",
side_effect=socket.gaierror,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_IMPORT_DATA,
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == RESULT_UNKNOWN_HOST
async def test_dhcp(hass: HomeAssistant, remote: Mock, remotews: Mock):
"""Test starting a flow from dhcp."""
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=MOCK_DHCP_DATA,
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# entry was added
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "create_entry"
assert result["title"] == "Living Room (82GXARRS)"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "Living Room"
assert result["data"][CONF_MAC] == "aa:bb:cc:dd:ee:ff"
assert result["data"][CONF_MANUFACTURER] == "Samsung"
assert result["data"][CONF_MODEL] == "82GXARRS"
assert result["result"].unique_id == "be9554b9-c9fb-41f4-8920-22da015376a4"
async def test_zeroconf(hass: HomeAssistant, remote: Mock, remotews: Mock):
"""Test starting a flow from zeroconf."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=MOCK_ZEROCONF_DATA,
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# entry was added
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "create_entry"
assert result["title"] == "Living Room (82GXARRS)"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "Living Room"
assert result["data"][CONF_MAC] == "aa:bb:cc:dd:ee:ff"
assert result["data"][CONF_MANUFACTURER] == "Samsung"
assert result["data"][CONF_MODEL] == "82GXARRS"
assert result["result"].unique_id == "be9554b9-c9fb-41f4-8920-22da015376a4"
async def test_zeroconf_ignores_soundbar(hass: HomeAssistant, remotews_soundbar: Mock):
"""Test starting a flow from zeroconf where the device is actually a soundbar."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=MOCK_ZEROCONF_DATA,
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "not_supported"
async def test_zeroconf_no_device_info(
hass: HomeAssistant, remote: Mock, remotews_no_device_info: Mock
):
"""Test starting a flow from zeroconf where device_info returns None."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=MOCK_ZEROCONF_DATA,
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "not_supported"
async def test_zeroconf_and_dhcp_same_time(hass: HomeAssistant, remotews: Mock):
"""Test starting a flow from zeroconf and dhcp."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=MOCK_DHCP_DATA,
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result2 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=MOCK_ZEROCONF_DATA,
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "already_in_progress"
async def test_autodetect_websocket(hass: HomeAssistant, remote: Mock, remotews: Mock):
"""Test for send key with autodetection of protocol."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.config_flow.socket.gethostbyname",
return_value="fake_host",
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS"
) as remotews:
enter = Mock()
type(enter).token = PropertyMock(return_value="123456789")
remote = Mock()
remote.__enter__ = Mock(return_value=enter)
remote.__exit__ = Mock(return_value=False)
remote.rest_device_info.return_value = {
"id": "uuid:be9554b9-c9fb-41f4-8920-22da015376a4",
"device": {
"modelName": "82GXARRS",
"networkType": "wireless",
"wifiMac": "aa:bb:cc:dd:ee:ff",
"udn": "uuid:be9554b9-c9fb-41f4-8920-22da015376a4",
"mac": "aa:bb:cc:dd:ee:ff",
"name": "[TV] Living Room",
"type": "Samsung SmartTV",
},
}
remotews.return_value = remote
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
assert result["data"][CONF_METHOD] == "websocket"
assert result["data"][CONF_TOKEN] == "123456789"
assert remotews.call_count == 2
assert remotews.call_args_list == [
call(**AUTODETECT_WEBSOCKET_SSL),
call(**DEVICEINFO_WEBSOCKET_SSL),
]
await hass.async_block_till_done()
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0].data[CONF_MAC] == "aa:bb:cc:dd:ee:ff"
async def test_websocket_no_mac(hass: HomeAssistant, remote: Mock, remotews: Mock):
"""Test for send key with autodetection of protocol."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.config_flow.socket.gethostbyname",
return_value="fake_host",
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS"
) as remotews, patch(
"getmac.get_mac_address", return_value="gg:hh:ii:ll:mm:nn"
):
enter = Mock()
type(enter).token = PropertyMock(return_value="123456789")
remote = Mock()
remote.__enter__ = Mock(return_value=enter)
remote.__exit__ = Mock(return_value=False)
remote.rest_device_info.return_value = {
"id": "uuid:be9554b9-c9fb-41f4-8920-22da015376a4",
"device": {
"modelName": "82GXARRS",
"networkType": "lan",
"udn": "uuid:be9554b9-c9fb-41f4-8920-22da015376a4",
"name": "[TV] Living Room",
"type": "Samsung SmartTV",
},
}
remotews.return_value = remote
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
assert result["data"][CONF_METHOD] == "websocket"
assert result["data"][CONF_TOKEN] == "123456789"
assert result["data"][CONF_MAC] == "gg:hh:ii:ll:mm:nn"
assert remotews.call_count == 2
assert remotews.call_args_list == [
call(**AUTODETECT_WEBSOCKET_SSL),
call(**DEVICEINFO_WEBSOCKET_SSL),
]
await hass.async_block_till_done()
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0].data[CONF_MAC] == "gg:hh:ii:ll:mm:nn"
async def test_autodetect_auth_missing(hass: HomeAssistant, remote: Mock):
"""Test for send key with autodetection of protocol."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=[AccessDenied("Boom")],
) as remote, patch(
"homeassistant.components.samsungtv.config_flow.socket.gethostbyname",
return_value="fake_host",
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_AUTH_MISSING
assert remote.call_count == 1
assert remote.call_args_list == [call(AUTODETECT_LEGACY)]
async def test_autodetect_not_supported(hass: HomeAssistant, remote: Mock):
"""Test for send key with autodetection of protocol."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=[UnhandledResponse("Boom")],
) as remote, patch(
"homeassistant.components.samsungtv.config_flow.socket.gethostbyname",
return_value="fake_host",
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_NOT_SUPPORTED
assert remote.call_count == 1
assert remote.call_args_list == [call(AUTODETECT_LEGACY)]
async def test_autodetect_legacy(hass: HomeAssistant, remote: Mock):
"""Test for send key with autodetection of protocol."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
assert result["data"][CONF_METHOD] == "legacy"
assert result["data"][CONF_NAME] == "fake_name"
assert result["data"][CONF_MAC] is None
assert result["data"][CONF_PORT] == LEGACY_PORT
async def test_autodetect_none(hass: HomeAssistant, remote: Mock, remotews: Mock):
"""Test for send key with autodetection of protocol."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=OSError("Boom"),
) as remote, patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS",
side_effect=OSError("Boom"),
) as remotews, patch(
"homeassistant.components.samsungtv.config_flow.socket.gethostbyname",
return_value="fake_host",
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_CANNOT_CONNECT
assert remote.call_count == 1
assert remote.call_args_list == [
call(AUTODETECT_LEGACY),
]
assert remotews.call_count == 2
assert remotews.call_args_list == [
call(**AUTODETECT_WEBSOCKET_SSL),
call(**AUTODETECT_WEBSOCKET_PLAIN),
]
async def test_update_old_entry(hass: HomeAssistant, remote: Mock, remotews: Mock):
"""Test update of old entry."""
with patch("homeassistant.components.samsungtv.bridge.Remote") as remote:
remote().rest_device_info.return_value = {
"device": {
"modelName": "fake_model2",
"name": "[TV] Fake Name",
"udn": "uuid:fake_serial",
}
}
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_OLD_ENTRY)
entry.add_to_hass(hass)
config_entries_domain = hass.config_entries.async_entries(DOMAIN)
assert len(config_entries_domain) == 1
assert entry is config_entries_domain[0]
assert entry.data[CONF_ID] == "0d1cef00-00dc-1000-9c80-4844f7b172de_old"
assert entry.data[CONF_IP_ADDRESS] == EXISTING_IP
assert not entry.unique_id
assert await async_setup_component(hass, DOMAIN, {}) is True
await hass.async_block_till_done()
# failed as already configured
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_ALREADY_CONFIGURED
config_entries_domain = hass.config_entries.async_entries(DOMAIN)
assert len(config_entries_domain) == 1
entry2 = config_entries_domain[0]
# check updated device info
assert entry2.data.get(CONF_ID) is not None
assert entry2.data.get(CONF_IP_ADDRESS) is not None
assert entry2.unique_id == "0d1cef00-00dc-1000-9c80-4844f7b172de"
async def test_update_missing_mac_unique_id_added_from_dhcp(hass, remotews: Mock):
"""Test missing mac and unique id added."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_OLD_ENTRY, unique_id=None)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.samsungtv.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.samsungtv.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=MOCK_DHCP_DATA,
)
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data[CONF_MAC] == "aa:bb:cc:dd:ee:ff"
assert entry.unique_id == "be9554b9-c9fb-41f4-8920-22da015376a4"
async def test_update_missing_mac_unique_id_added_from_zeroconf(hass, remotews: Mock):
"""Test missing mac and unique id added."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_OLD_ENTRY, unique_id=None)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.samsungtv.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.samsungtv.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=MOCK_ZEROCONF_DATA,
)
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data[CONF_MAC] == "aa:bb:cc:dd:ee:ff"
assert entry.unique_id == "be9554b9-c9fb-41f4-8920-22da015376a4"
async def test_update_missing_mac_unique_id_added_from_ssdp(hass, remotews: Mock):
"""Test missing mac and unique id added via ssdp."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_OLD_ENTRY, unique_id=None)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.samsungtv.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.samsungtv.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data=MOCK_SSDP_DATA,
)
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data[CONF_MAC] == "aa:bb:cc:dd:ee:ff"
assert entry.unique_id == "0d1cef00-00dc-1000-9c80-4844f7b172de"
async def test_update_missing_mac_added_unique_id_preserved_from_zeroconf(
hass, remotews: Mock
):
"""Test missing mac and unique id added."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_OLD_ENTRY,
unique_id="0d1cef00-00dc-1000-9c80-4844f7b172de",
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.samsungtv.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.samsungtv.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=MOCK_ZEROCONF_DATA,
)
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data[CONF_MAC] == "aa:bb:cc:dd:ee:ff"
assert entry.unique_id == "0d1cef00-00dc-1000-9c80-4844f7b172de"
async def test_update_legacy_missing_mac_from_dhcp(hass, remote: Mock):
"""Test missing mac added."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_LEGACY_ENTRY,
unique_id="0d1cef00-00dc-1000-9c80-4844f7b172de",
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.samsungtv.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.samsungtv.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=dhcp.DhcpServiceInfo(
ip=EXISTING_IP, macaddress="aa:bb:cc:dd:ee:ff", hostname="fake_hostname"
),
)
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data[CONF_MAC] == "aa:bb:cc:dd:ee:ff"
assert entry.unique_id == "0d1cef00-00dc-1000-9c80-4844f7b172de"
async def test_update_legacy_missing_mac_from_dhcp_no_unique_id(hass, remote: Mock):
"""Test missing mac added when there is no unique id."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_LEGACY_ENTRY,
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS.rest_device_info",
side_effect=HttpApiError,
), patch(
"homeassistant.components.samsungtv.bridge.Remote.__enter__",
return_value=True,
), patch(
"homeassistant.components.samsungtv.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.samsungtv.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=dhcp.DhcpServiceInfo(
ip=EXISTING_IP, macaddress="aa:bb:cc:dd:ee:ff", hostname="fake_hostname"
),
)
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert result["type"] == "abort"
assert result["reason"] == "not_supported"
assert entry.data[CONF_MAC] == "aa:bb:cc:dd:ee:ff"
assert entry.unique_id is None
async def test_form_reauth_legacy(hass, remote: Mock):
"""Test reauthenticate legacy."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_OLD_ENTRY)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"entry_id": entry.entry_id, "source": config_entries.SOURCE_REAUTH},
data=entry.data,
)
assert result["type"] == "form"
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "reauth_successful"
async def test_form_reauth_websocket(hass, remotews: Mock):
"""Test reauthenticate websocket."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_WS_ENTRY)
entry.add_to_hass(hass)
assert entry.state == config_entries.ConfigEntryState.NOT_LOADED
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"entry_id": entry.entry_id, "source": config_entries.SOURCE_REAUTH},
data=entry.data,
)
assert result["type"] == "form"
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "reauth_successful"
assert entry.state == config_entries.ConfigEntryState.LOADED
async def test_form_reauth_websocket_cannot_connect(hass, remotews: Mock):
"""Test reauthenticate websocket when we cannot connect on the first attempt."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_WS_ENTRY)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"entry_id": entry.entry_id, "source": config_entries.SOURCE_REAUTH},
data=entry.data,
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS",
side_effect=ConnectionFailure,
), patch(
"homeassistant.components.samsungtv.config_flow.socket.gethostbyname",
return_value="fake_host",
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["errors"] == {"base": RESULT_AUTH_MISSING}
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result3["type"] == "abort"
assert result3["reason"] == "reauth_successful"
async def test_form_reauth_websocket_not_supported(hass, remotews: Mock):
"""Test reauthenticate websocket when the device is not supported."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_WS_ENTRY)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"entry_id": entry.entry_id, "source": config_entries.SOURCE_REAUTH},
data=entry.data,
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS",
side_effect=WebSocketException,
), patch(
"homeassistant.components.samsungtv.config_flow.socket.gethostbyname",
return_value="fake_host",
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "not_supported"
| 37.081081 | 90 | 0.66414 |
79420e8e6de3c6320ac6bbcfa2354afd7b8779fd | 21 | py | Python | tests/__init__.py | radiasoft/opal | 28fe320c0b9f9d65e78a95df59daa5126304b184 | [
"Apache-2.0"
] | 1 | 2017-04-27T12:59:52.000Z | 2017-04-27T12:59:52.000Z | tests/interpolaters_depositers/__init__.py | radiasoft/opal | 28fe320c0b9f9d65e78a95df59daa5126304b184 | [
"Apache-2.0"
] | null | null | null | tests/interpolaters_depositers/__init__.py | radiasoft/opal | 28fe320c0b9f9d65e78a95df59daa5126304b184 | [
"Apache-2.0"
] | null | null | null | __author__ = 'swebb'
| 10.5 | 20 | 0.714286 |
79420f9fb31348502c2e70ea073a43f4881c43e5 | 1,221 | py | Python | build/mbf_simple_nav/catkin_generated/pkg.develspace.context.pc.py | 6RiverSystems/darknet_ros | 03c72b96afa99f7cc75f7792b51deb4a7f4ed379 | [
"BSD-3-Clause"
] | null | null | null | build/mbf_simple_nav/catkin_generated/pkg.develspace.context.pc.py | 6RiverSystems/darknet_ros | 03c72b96afa99f7cc75f7792b51deb4a7f4ed379 | [
"BSD-3-Clause"
] | null | null | null | build/mbf_simple_nav/catkin_generated/pkg.develspace.context.pc.py | 6RiverSystems/darknet_ros | 03c72b96afa99f7cc75f7792b51deb4a7f4ed379 | [
"BSD-3-Clause"
] | null | null | null | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/kalyco/mfp_workspace/src/move_base_flex/mbf_simple_nav/include;/usr/include".split(';') if "/home/kalyco/mfp_workspace/src/move_base_flex/mbf_simple_nav/include;/usr/include" != "" else []
PROJECT_CATKIN_DEPENDS = "actionlib;actionlib_msgs;dynamic_reconfigure;geometry_msgs;mbf_abstract_nav;mbf_msgs;mbf_abstract_core;nav_msgs;pluginlib;roscpp;std_msgs;std_srvs;tf;tf2;tf2_ros".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lmbf_simple_server;/usr/lib/x86_64-linux-gnu/libboost_thread.so;/usr/lib/x86_64-linux-gnu/libboost_chrono.so;/usr/lib/x86_64-linux-gnu/libboost_system.so;/usr/lib/x86_64-linux-gnu/libboost_date_time.so;/usr/lib/x86_64-linux-gnu/libboost_atomic.so".split(';') if "-lmbf_simple_server;/usr/lib/x86_64-linux-gnu/libboost_thread.so;/usr/lib/x86_64-linux-gnu/libboost_chrono.so;/usr/lib/x86_64-linux-gnu/libboost_system.so;/usr/lib/x86_64-linux-gnu/libboost_date_time.so;/usr/lib/x86_64-linux-gnu/libboost_atomic.so" != "" else []
PROJECT_NAME = "mbf_simple_nav"
PROJECT_SPACE_DIR = "/home/kalyco/mfp_workspace/devel/.private/mbf_simple_nav"
PROJECT_VERSION = "0.2.4"
| 135.666667 | 562 | 0.812449 |
7942105450bf1ddce7294bffb107b17bebb5056a | 19,229 | py | Python | tf_slim/data/tfexample_decoder.py | englertbruno/tf-slim | d0eb4075a8e18b8c29004dd5eb3035c36a8d9784 | [
"Apache-2.0"
] | 9 | 2020-04-21T12:31:23.000Z | 2020-11-20T15:07:29.000Z | tf_slim/data/tfexample_decoder.py | englertbruno/tf-slim | d0eb4075a8e18b8c29004dd5eb3035c36a8d9784 | [
"Apache-2.0"
] | 1 | 2020-11-23T07:38:07.000Z | 2020-11-23T07:38:07.000Z | tf_slim/data/tfexample_decoder.py | englertbruno/tf-slim | d0eb4075a8e18b8c29004dd5eb3035c36a8d9784 | [
"Apache-2.0"
] | 3 | 2020-05-08T02:55:11.000Z | 2021-12-10T07:16:28.000Z | # coding=utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the TFExampleDecoder its associated helper classes.
The TFExampleDecode is a DataDecoder used to decode TensorFlow Example protos.
In order to do so each requested item must be paired with one or more Example
features that are parsed to produce the Tensor-based manifestation of the item.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tf_slim.data import data_decoder
# pylint:disable=g-direct-tensorflow-import
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
# pylint:enable=g-direct-tensorflow-import
@six.add_metaclass(abc.ABCMeta)
class ItemHandler(object):
"""Specifies the item-to-Features mapping for tf.parse_example.
An ItemHandler both specifies a list of Features used for parsing an Example
proto as well as a function that post-processes the results of Example
parsing.
"""
def __init__(self, keys):
"""Constructs the handler with the name of the tf.train.Feature keys to use.
Args:
keys: the name of the TensorFlow Example Feature.
"""
if not isinstance(keys, (tuple, list)):
keys = [keys]
self._keys = keys
@property
def keys(self):
return self._keys
@abc.abstractmethod
def tensors_to_item(self, keys_to_tensors):
"""Maps the given dictionary of tensors to the requested item.
Args:
keys_to_tensors: a mapping of TF-Example keys to parsed tensors.
Returns:
the final tensor representing the item being handled.
"""
pass
class ItemHandlerCallback(ItemHandler):
"""An ItemHandler that converts the parsed tensors via a given function.
Unlike other ItemHandlers, the ItemHandlerCallback resolves its item via
a callback function rather than using prespecified behavior.
"""
def __init__(self, keys, func):
"""Initializes the ItemHandler.
Args:
keys: a list of TF-Example keys.
func: a function that takes as an argument a dictionary from `keys` to
parsed Tensors.
"""
super(ItemHandlerCallback, self).__init__(keys)
self._func = func
def tensors_to_item(self, keys_to_tensors):
return self._func(keys_to_tensors)
class BoundingBox(ItemHandler):
"""An ItemHandler that concatenates a set of parsed Tensors to Bounding Boxes.
"""
def __init__(self, keys=None, prefix=''):
"""Initialize the bounding box handler.
Args:
keys: A list of four key names representing the ymin, xmin, ymax, mmax
prefix: An optional prefix for each of the bounding box keys.
If provided, `prefix` is appended to each key in `keys`.
Raises:
ValueError: if keys is not `None` and also not a list of exactly 4 keys
"""
if keys is None:
keys = ['ymin', 'xmin', 'ymax', 'xmax']
elif len(keys) != 4:
raise ValueError('BoundingBox expects 4 keys but got {}'.format(
len(keys)))
self._prefix = prefix
self._keys = keys
self._full_keys = [prefix + k for k in keys]
super(BoundingBox, self).__init__(self._full_keys)
def tensors_to_item(self, keys_to_tensors):
"""Maps the given dictionary of tensors to a concatenated list of bboxes.
Args:
keys_to_tensors: a mapping of TF-Example keys to parsed tensors.
Returns:
[num_boxes, 4] tensor of bounding box coordinates,
i.e. 1 bounding box per row, in order [y_min, x_min, y_max, x_max].
"""
sides = []
for key in self._full_keys:
side = keys_to_tensors[key]
if isinstance(side, sparse_tensor.SparseTensor):
side = side.values
side = array_ops.expand_dims(side, 0)
sides.append(side)
bounding_box = array_ops.concat(sides, 0)
return array_ops.transpose(bounding_box)
class Tensor(ItemHandler):
"""An ItemHandler that returns a parsed Tensor."""
def __init__(self, tensor_key, shape_keys=None, shape=None, default_value=0):
"""Initializes the Tensor handler.
Tensors are, by default, returned without any reshaping. However, there are
two mechanisms which allow reshaping to occur at load time. If `shape_keys`
is provided, both the `Tensor` corresponding to `tensor_key` and
`shape_keys` is loaded and the former `Tensor` is reshaped with the values
of the latter. Alternatively, if a fixed `shape` is provided, the `Tensor`
corresponding to `tensor_key` is loaded and reshape appropriately.
If neither `shape_keys` nor `shape` are provided, the `Tensor` will be
returned without any reshaping.
Args:
tensor_key: the name of the `TFExample` feature to read the tensor from.
shape_keys: Optional name or list of names of the TF-Example feature in
which the tensor shape is stored. If a list, then each corresponds to
one dimension of the shape.
shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is
reshaped accordingly.
default_value: The value used when the `tensor_key` is not found in a
particular `TFExample`.
Raises:
ValueError: if both `shape_keys` and `shape` are specified.
"""
if shape_keys and shape is not None:
raise ValueError('Cannot specify both shape_keys and shape parameters.')
if shape_keys and not isinstance(shape_keys, list):
shape_keys = [shape_keys]
self._tensor_key = tensor_key
self._shape_keys = shape_keys
self._shape = shape
self._default_value = default_value
keys = [tensor_key]
if shape_keys:
keys.extend(shape_keys)
super(Tensor, self).__init__(keys)
def tensors_to_item(self, keys_to_tensors):
tensor = keys_to_tensors[self._tensor_key]
shape = self._shape
if self._shape_keys:
shape_dims = []
for k in self._shape_keys:
shape_dim = keys_to_tensors[k]
if isinstance(shape_dim, sparse_tensor.SparseTensor):
shape_dim = sparse_ops.sparse_tensor_to_dense(shape_dim)
shape_dims.append(shape_dim)
shape = array_ops.reshape(array_ops.stack(shape_dims), [-1])
if isinstance(tensor, sparse_tensor.SparseTensor):
if shape is not None:
tensor = sparse_ops.sparse_reshape(tensor, shape)
tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)
else:
if shape is not None:
tensor = array_ops.reshape(tensor, shape)
return tensor
class LookupTensor(Tensor):
"""An ItemHandler that returns a parsed Tensor, the result of a lookup."""
def __init__(self,
tensor_key,
table,
shape_keys=None,
shape=None,
default_value=''):
"""Initializes the LookupTensor handler.
See Tensor. Simply calls a vocabulary (most often, a label mapping) lookup.
Args:
tensor_key: the name of the `TFExample` feature to read the tensor from.
table: A tf.lookup table.
shape_keys: Optional name or list of names of the TF-Example feature in
which the tensor shape is stored. If a list, then each corresponds to
one dimension of the shape.
shape: Optional output shape of the `Tensor`. If provided, the `Tensor` is
reshaped accordingly.
default_value: The value used when the `tensor_key` is not found in a
particular `TFExample`.
Raises:
ValueError: if both `shape_keys` and `shape` are specified.
"""
self._table = table
super(LookupTensor, self).__init__(tensor_key, shape_keys, shape,
default_value)
def tensors_to_item(self, keys_to_tensors):
unmapped_tensor = super(LookupTensor, self).tensors_to_item(keys_to_tensors)
return self._table.lookup(unmapped_tensor)
class BackupHandler(ItemHandler):
"""An ItemHandler that tries two ItemHandlers in order."""
def __init__(self, handler, backup):
"""Initializes the BackupHandler handler.
If the first Handler's tensors_to_item returns a Tensor with no elements,
the second Handler is used.
Args:
handler: The primary ItemHandler.
backup: The backup ItemHandler.
Raises:
ValueError: if either is not an ItemHandler.
"""
if not isinstance(handler, ItemHandler):
raise ValueError('Primary handler is of type %s instead of ItemHandler'
% type(handler))
if not isinstance(backup, ItemHandler):
raise ValueError('Backup handler is of type %s instead of ItemHandler'
% type(backup))
self._handler = handler
self._backup = backup
super(BackupHandler, self).__init__(handler.keys + backup.keys)
def tensors_to_item(self, keys_to_tensors):
item = self._handler.tensors_to_item(keys_to_tensors)
return control_flow_ops.cond(
pred=math_ops.equal(math_ops.reduce_prod(array_ops.shape(item)), 0),
true_fn=lambda: self._backup.tensors_to_item(keys_to_tensors),
false_fn=lambda: item)
class SparseTensor(ItemHandler):
"""An ItemHandler for SparseTensors."""
def __init__(self,
indices_key=None,
values_key=None,
shape_key=None,
shape=None,
densify=False,
default_value=0):
"""Initializes the Tensor handler.
Args:
indices_key: the name of the TF-Example feature that contains the ids.
Defaults to 'indices'.
values_key: the name of the TF-Example feature that contains the values.
Defaults to 'values'.
shape_key: the name of the TF-Example feature that contains the shape.
If provided it would be used.
shape: the output shape of the SparseTensor. If `shape_key` is not
provided this `shape` would be used.
densify: whether to convert the SparseTensor into a dense Tensor.
default_value: Scalar value to set when making dense for indices not
specified in the `SparseTensor`.
"""
indices_key = indices_key or 'indices'
values_key = values_key or 'values'
self._indices_key = indices_key
self._values_key = values_key
self._shape_key = shape_key
self._shape = shape
self._densify = densify
self._default_value = default_value
keys = [indices_key, values_key]
if shape_key:
keys.append(shape_key)
super(SparseTensor, self).__init__(keys)
def tensors_to_item(self, keys_to_tensors):
indices = keys_to_tensors[self._indices_key]
values = keys_to_tensors[self._values_key]
if self._shape_key:
shape = keys_to_tensors[self._shape_key]
if isinstance(shape, sparse_tensor.SparseTensor):
shape = sparse_ops.sparse_tensor_to_dense(shape)
elif self._shape:
shape = self._shape
else:
shape = indices.dense_shape
indices_shape = array_ops.shape(indices.indices)
rank = indices_shape[1]
ids = math_ops.cast(indices.values, dtypes.int64)
indices_columns_to_preserve = array_ops.slice(
indices.indices, [0, 0], array_ops.stack([-1, rank - 1]))
new_indices = array_ops.concat(
[indices_columns_to_preserve, array_ops.reshape(ids, [-1, 1])], 1)
tensor = sparse_tensor.SparseTensor(new_indices, values.values, shape)
if self._densify:
tensor = sparse_ops.sparse_tensor_to_dense(tensor, self._default_value)
return tensor
class Image(ItemHandler):
"""An ItemHandler that decodes a parsed Tensor as an image."""
def __init__(self,
image_key=None,
format_key=None,
shape=None,
channels=3,
dtype=dtypes.uint8,
repeated=False,
dct_method=''):
"""Initializes the image.
Args:
image_key: the name of the TF-Example feature in which the encoded image
is stored.
format_key: the name of the TF-Example feature in which the image format
is stored.
shape: the output shape of the image as 1-D `Tensor`
[height, width, channels]. If provided, the image is reshaped
accordingly. If left as None, no reshaping is done. A shape should
be supplied only if all the stored images have the same shape.
channels: the number of channels in the image.
dtype: images will be decoded at this bit depth. Different formats
support different bit depths.
See tf.image.decode_image,
tf.io.decode_raw,
repeated: if False, decodes a single image. If True, decodes a
variable number of image strings from a 1D tensor of strings.
dct_method: An optional string. Defaults to empty string. It only takes
effect when image format is jpeg, used to specify a hint about the
algorithm used for jpeg decompression. Currently valid values
are ['INTEGER_FAST', 'INTEGER_ACCURATE']. The hint may be ignored, for
example, the jpeg library does not have that specific option.
"""
if not image_key:
image_key = 'image/encoded'
if not format_key:
format_key = 'image/format'
super(Image, self).__init__([image_key, format_key])
self._image_key = image_key
self._format_key = format_key
self._shape = shape
self._channels = channels
self._dtype = dtype
self._repeated = repeated
self._dct_method = dct_method
def tensors_to_item(self, keys_to_tensors):
"""See base class."""
image_buffer = keys_to_tensors[self._image_key]
image_format = keys_to_tensors[self._format_key]
if self._repeated:
return map_fn.map_fn(lambda x: self._decode(x, image_format),
image_buffer, dtype=self._dtype)
else:
return self._decode(image_buffer, image_format)
def _decode(self, image_buffer, image_format):
"""Decodes the image buffer.
Args:
image_buffer: The tensor representing the encoded image tensor.
image_format: The image format for the image in `image_buffer`. If image
format is `raw`, all images are expected to be in this format, otherwise
this op can decode a mix of `jpg` and `png` formats.
Returns:
A tensor that represents decoded image of self._shape, or
(?, ?, self._channels) if self._shape is not specified.
"""
def decode_image():
"""Decodes a image based on the headers."""
return math_ops.cast(
image_ops.decode_image(image_buffer, channels=self._channels),
self._dtype)
def decode_jpeg():
"""Decodes a jpeg image with specified '_dct_method'."""
return math_ops.cast(
image_ops.decode_jpeg(
image_buffer,
channels=self._channels,
dct_method=self._dct_method), self._dtype)
def check_jpeg():
"""Checks if an image is jpeg."""
# For jpeg, we directly use image_ops.decode_jpeg rather than decode_image
# in order to feed the jpeg specify parameter 'dct_method'.
return control_flow_ops.cond(
image_ops.is_jpeg(image_buffer),
decode_jpeg,
decode_image,
name='cond_jpeg')
def decode_raw():
"""Decodes a raw image."""
return parsing_ops.decode_raw(image_buffer, out_type=self._dtype)
pred_fn_pairs = [(math_ops.logical_or(
math_ops.equal(image_format, 'raw'),
math_ops.equal(image_format, 'RAW')), decode_raw)]
image = control_flow_ops.case(
pred_fn_pairs, default=check_jpeg, exclusive=True)
image.set_shape([None, None, self._channels])
if self._shape is not None:
image = array_ops.reshape(image, self._shape)
return image
class TFExampleDecoder(data_decoder.DataDecoder):
"""A decoder for TensorFlow Examples.
Decoding Example proto buffers is comprised of two stages: (1) Example parsing
and (2) tensor manipulation.
In the first stage, the tf.io.parse_example function is called with a list of
FixedLenFeatures and SparseLenFeatures. These instances tell TF how to parse
the example. The output of this stage is a set of tensors.
In the second stage, the resulting tensors are manipulated to provide the
requested 'item' tensors.
To perform this decoding operation, an ExampleDecoder is given a list of
ItemHandlers. Each ItemHandler indicates the set of features for stage 1 and
contains the instructions for post_processing its tensors for stage 2.
"""
def __init__(self, keys_to_features, items_to_handlers):
"""Constructs the decoder.
Args:
keys_to_features: a dictionary from TF-Example keys to either
tf.io.VarLenFeature or tf.io.FixedLenFeature instances. See tensorflow's
parsing_ops.py.
items_to_handlers: a dictionary from items (strings) to ItemHandler
instances. Note that the ItemHandler's are provided the keys that they
use to return the final item Tensors.
"""
self._keys_to_features = keys_to_features
self._items_to_handlers = items_to_handlers
def list_items(self):
"""See base class."""
return list(self._items_to_handlers.keys())
def decode(self, serialized_example, items=None):
"""Decodes the given serialized TF-example.
Args:
serialized_example: a serialized TF-example tensor.
items: the list of items to decode. These must be a subset of the item
keys in self._items_to_handlers. If `items` is left as None, then all
of the items in self._items_to_handlers are decoded.
Returns:
the decoded items, a list of tensor.
"""
example = parsing_ops.parse_single_example(serialized_example,
self._keys_to_features)
# Reshape non-sparse elements just once, adding the reshape ops in
# deterministic order.
for k in sorted(self._keys_to_features):
v = self._keys_to_features[k]
if isinstance(v, parsing_ops.FixedLenFeature):
example[k] = array_ops.reshape(example[k], v.shape)
if not items:
items = self._items_to_handlers.keys()
outputs = []
for item in items:
handler = self._items_to_handlers[item]
keys_to_tensors = {key: example[key] for key in handler.keys}
outputs.append(handler.tensors_to_item(keys_to_tensors))
return outputs
| 36.487666 | 80 | 0.690884 |
7942116c96121ccd6101eb2f9100e95301f497a4 | 4,883 | py | Python | python/scannerpy/protobufs.py | agirbau/scanner | 04a0c4b4196341995985acd729c0788aab823e1c | [
"Apache-2.0"
] | 618 | 2017-02-15T02:49:25.000Z | 2022-03-26T13:22:08.000Z | python/scannerpy/protobufs.py | agirbau/scanner | 04a0c4b4196341995985acd729c0788aab823e1c | [
"Apache-2.0"
] | 258 | 2017-02-14T23:37:35.000Z | 2020-01-17T07:50:30.000Z | python/scannerpy/protobufs.py | agirbau/scanner | 04a0c4b4196341995985acd729c0788aab823e1c | [
"Apache-2.0"
] | 108 | 2017-02-23T01:43:33.000Z | 2022-03-23T02:44:10.000Z | import os.path
import imp
import sys
from scannerpy.common import *
import scanner.metadata_pb2 as metadata_types
import scanner.source_args_pb2 as source_types
import scanner.sink_args_pb2 as sink_types
import scanner.sampler_args_pb2 as sampler_types
import scanner.engine.rpc_pb2 as rpc_types
import scanner.engine.rpc_pb2_grpc as grpc_types
import scanner.types_pb2 as misc_types
from google.protobuf.descriptor import FieldDescriptor
class ProtobufGenerator:
def __init__(self):
self._mods = []
self._paths = []
for mod in [
misc_types, rpc_types, grpc_types, metadata_types,
source_types, sink_types, sampler_types
]:
self.add_module(mod)
def add_module(self, path):
if path in self._paths:
return
if isinstance(path, str):
if not os.path.isfile(path):
raise ScannerException('Protobuf path does not exist: {}'
.format(path))
imp.acquire_lock()
mod = imp.load_source('_ignore', path)
imp.release_lock()
else:
mod = path
self._paths.append(path)
self._mods.append(mod)
# By default, ProtobufGenerator does not work correctly with pickle.
# If you pickle something that closes over a ProtobufGenerator instance,
# then pickle will save the dynamically imported modules (imp.load_source)
# just as the name of module and not the path, i.e. just "_ignore". It will
# then try to re-import "_ignore" upon unpickling, and that module will not exist.
#
# The solution is to override the default mechanism for pickling the object:
# https://docs.python.org/3/library/pickle.html#object.__reduce__
#
# We capture the information necessary to recreate the ProtobufGenerator (its paths)
# and provide a function that creates the new instance.
def __reduce__(self):
def make_gen(paths):
p = ProtobufGenerator()
for path in paths:
p.add_module(path)
return (make_gen, (self._paths,))
def __getattr__(self, name):
for mod in self._mods:
if hasattr(mod, name):
return getattr(mod, name)
# This has to be an AttributeError (as opposed to an Exception) or else
# APIs that use reflection like pickle will break
raise AttributeError('No protobuf with name {}'.format(name))
protobufs = ProtobufGenerator()
def analyze_proto(proto):
def analyze(p):
fields = {}
for f in p.fields:
child_fields = None
if f.type == FieldDescriptor.TYPE_MESSAGE:
child_fields = analyze(f.message_type)
fields[f.name] = {
'type':
f.type,
'message':
getattr(protobufs, f.message_type.name)
if f.message_type is not None else None,
'repeated':
f.label == FieldDescriptor.LABEL_REPEATED,
'fields':
child_fields,
}
return fields
return analyze(proto.DESCRIPTOR)
def python_to_proto(proto_name, obj):
args_proto = getattr(protobufs, proto_name)
p = analyze_proto(args_proto)
def create_obj(proto, p, obj):
if isinstance(obj, proto):
return obj
elif not isinstance(obj, dict):
raise ScannerException('Attempted to bind a non-dict type to a '
'protobuf')
proto_obj = proto()
for k, v in obj.items():
if k not in p:
raise ScannerException(
'Protobuf {} does not have field {:s}'.format(
proto_name, k))
desc = p[k]
# If a message field
def make_field(val):
if desc['type'] == FieldDescriptor.TYPE_MESSAGE:
# If a message field, we need to recursively invoke
# serialization
return create_obj(desc['message'], desc['fields'], val)
else:
return val
if p[k]['repeated']:
# If a repeated field, we need to set using slicing
data = []
for vi in v:
data.append(make_field(vi))
getattr(proto_obj, k)[:] = data
elif p[k]['message'] is not None:
# If a message field, have to CopyFrom, can't use direct assignment
getattr(proto_obj, k).CopyFrom(make_field(v))
elif make_field(v) is not None:
# Just set the regular field
setattr(proto_obj, k, make_field(v))
return proto_obj
return create_obj(args_proto, p, obj).SerializeToString()
| 34.387324 | 88 | 0.581814 |
79421248e97a6c2799e686dc111cb49bdac5f21f | 41,181 | py | Python | python/graphscope/tests/unittest/test_udf_app.py | pwrliang/GraphScope | 56979ed5109ee0344fee8877460927e47147a62f | [
"Apache-2.0"
] | null | null | null | python/graphscope/tests/unittest/test_udf_app.py | pwrliang/GraphScope | 56979ed5109ee0344fee8877460927e47147a62f | [
"Apache-2.0"
] | null | null | null | python/graphscope/tests/unittest/test_udf_app.py | pwrliang/GraphScope | 56979ed5109ee0344fee8877460927e47147a62f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import io
import logging
import os
import tempfile
import zipfile
from io import BytesIO
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
import yaml
import graphscope
from graphscope.analytical.udf.decorators import pie
from graphscope.analytical.udf.decorators import pregel
from graphscope.analytical.udf.utils import PregelAggregatorType
from graphscope.framework.app import AppAssets
from graphscope.framework.app import load_app
from graphscope.framework.errors import InvalidArgumentError
DEFAULT_GS_CONFIG_FILE = ".gs_conf.yaml"
logger = logging.getLogger("graphscope")
@pytest.fixture(scope="function")
def random_gar():
path = os.path.join(
"/",
tempfile.gettempprefix(),
"{}.gar".format(str(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f"))),
)
yield path
os.remove(path)
@pytest.fixture(scope="module")
def not_exist_gar():
path = os.path.join("not_exist_dir", "not_exist.gar")
return path
@pytest.fixture(scope="module")
def non_zipfile_gar():
path = os.path.join("/", tempfile.gettempprefix(), "test.txt")
Path(path).touch()
yield path
os.remove(path)
@pytest.fixture(scope="module")
def empty_gar():
path = os.path.join(
"/",
tempfile.gettempprefix(),
"{}.gar".format(str(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f"))),
)
empty_zip_data = b"PK\x05\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
with open(path, "wb") as f:
f.write(empty_zip_data)
yield path
os.remove(path)
def invalid_configfile_gar():
path = os.path.join(
"/",
tempfile.gettempprefix(),
"{}.gar".format(str(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f"))),
)
config = {"a": 10}
in_memory_buffer = BytesIO()
zip_file = zipfile.ZipFile(in_memory_buffer, "a", zipfile.ZIP_DEFLATED, False)
zip_file.writestr(DEFAULT_GS_CONFIG_FILE, yaml.dump(config))
zip_file.close()
with open(path, "wb") as f:
f.write(in_memory_buffer.getvalue())
yield path
os.remove(path)
# Example of pregel sssp
@pregel(vd_type="double", md_type="double")
class SSSP_Pregel(AppAssets):
@staticmethod
def Init(v, context):
v.set_value(1000000000.0)
@staticmethod
def Compute(messages, v, context):
src_id = context.get_config(b"src")
cur_dist = v.value()
new_dist = 1000000000.0
if v.id() == src_id:
new_dist = 0
for message in messages:
new_dist = min(message, new_dist)
if new_dist < cur_dist:
v.set_value(new_dist)
for e_label_id in range(context.edge_label_num()):
edges = v.outgoing_edges(e_label_id)
for e in edges:
v.send(e.vertex(), new_dist + e.get_int(2))
v.vote_to_halt()
# Example of pie sssp
@pie(vd_type="string", md_type="string")
class PIE_API_Test(AppAssets):
"""PIE API Test on ldbc sample graph."""
@staticmethod
def Init(frag, context):
pass
@staticmethod
def PEval(frag, context):
graphscope.declare(graphscope.Vertex, node)
fid = frag.fid()
if fid == 0:
# This is not stable, as it depend on worker's number
# assert frag.fnum() == 4
assert frag.vertex_label_num() == 8
assert frag.edge_label_num() == 15
assert frag.get_total_nodes_num() == 190376
v_label_num = frag.vertex_label_num()
for v_label_id in range(v_label_num):
assert frag.get_nodes_num(v_label_id) == (
frag.get_inner_nodes_num(v_label_id)
+ frag.get_outer_nodes_num(v_label_id)
)
nodes = frag.nodes(v_label_id)
assert nodes.size() == frag.get_nodes_num(v_label_id)
inner_nodes = frag.inner_nodes(v_label_id)
assert inner_nodes.size() == frag.get_inner_nodes_num(v_label_id)
outer_nodes = frag.outer_nodes(v_label_id)
assert outer_nodes.size() == frag.get_outer_nodes_num(v_label_id)
for iv in inner_nodes:
assert frag.get_node_fid(iv) == 0
assert frag.is_inner_node(iv) == True
vid = frag.get_node_id(iv)
assert frag.get_node(v_label_id, vid, node) == True
assert frag.get_inner_node(v_label_id, vid, node) == True
e_label_num = frag.edge_label_num()
for e_label_id in range(e_label_num):
edges = frag.get_outgoing_edges(iv, e_label_id)
if edges.size() > 0:
assert frag.has_child(iv, e_label_id) == True
assert frag.get_outdegree(iv, e_label_id) == int(edges.size())
edges = frag.get_incoming_edges(iv, e_label_id)
if edges.size() > 0:
assert frag.has_parent(iv, e_label_id) == True
assert frag.get_indegree(iv, e_label_id) == int(edges.size())
for ov in outer_nodes:
assert frag.is_outer_node(ov) == True
vid = frag.get_node_id(ov)
assert frag.get_node(v_label_id, vid, node) == True
assert frag.get_outer_node(v_label_id, vid, node) == True
for v_label in frag.vertex_labels():
label_id = frag.get_vertex_label_id_by_name(v_label)
assert frag.get_vertex_label_by_id(label_id) == v_label
for prop in frag.vertex_properties(v_label):
prop_id = frag.get_vertex_property_id_by_name(v_label, prop.first)
assert (
frag.get_vertex_property_by_id(v_label, prop_id) == prop.first
)
prop_id = frag.get_vertex_property_id_by_name(label_id, prop.first)
assert (
frag.get_vertex_property_by_id(label_id, prop_id) == prop.first
)
for prop in frag.vertex_properties(label_id):
pass
for e_label in frag.edge_labels():
e_label_id = frag.get_edge_label_id_by_name(e_label)
assert frag.get_edge_label_by_id(e_label_id) == e_label
for prop in frag.edge_properties(e_label):
prop_id = frag.get_edge_property_id_by_name(e_label, prop.first)
assert frag.get_edge_property_by_id(e_label, prop_id) == prop.first
prop_id = frag.get_edge_property_id_by_name(e_label_id, prop.first)
assert (
frag.get_edge_property_by_id(e_label_id, prop_id) == prop.first
)
for prop in frag.edge_properties(e_label_id):
pass
@staticmethod
def IncEval(frag, context):
pass
# Pregel API Test
@pregel(vd_type="string", md_type="string")
class Pregel_API_Test(AppAssets):
"""Pregel API Test on ldbc_sample graph"""
@staticmethod
def Init(v, context):
v.set_value(b"")
@staticmethod
def Compute(messages, v, context):
"""
Test on vertex with id = 933, label = person
"""
v.vote_to_halt()
# v.id()
vid = v.id()
# v.label()
label = v.label()
if vid == b"933" and label == b"person":
# v.label_id()
label_id = v.label_id()
assert context.get_vertex_label_by_id(label_id) == label
assert context.get_vertex_label_id_by_name(label) == label_id
v.set_value(b"graphscope")
assert v.value() == b"graphscope"
for prop in v.properties():
prop_id = context.get_vertex_property_id_by_name(v.label(), prop.first)
assert (
context.get_vertex_property_by_id(v.label(), prop_id) == prop.first
)
prop_id = context.get_vertex_property_id_by_name(
v.label_id(), prop.first
)
assert (
context.get_vertex_property_by_id(v.label_id(), prop_id)
== prop.first
)
if prop.second == b"DOUBLE":
# test v.get_double(v_property_name) / v.get_double(v_property_id)
assert v.get_double(prop.first) == v.get_double(prop_id)
elif prop.second == b"LONG":
# test v.get_int(v_property_name) / test v.get_int(v_property_id)
assert v.get_int(prop.first) == v.get_int(prop_id)
elif prop.second == b"STRING":
# test v.get_str(v_property_name) / test v.get_str(v_property_id)
assert v.get_str(prop.first) == v.get_str(prop_id)
assert context.superstep() == 0
assert context.get_config(b"param1") == b"graphscope"
assert context.get_config(b"param2") == b"graphscope2"
assert context.get_total_vertices_num() == 190376
assert context.vertex_label_num() == 8
assert context.edge_label_num() == 15
assert context.vertex_property_num(v.label()) == 8
assert context.vertex_property_num(v.label_id()) == 8
for e_label in context.edge_labels():
e_label_id = context.get_edge_label_id_by_name(e_label)
assert context.get_edge_label_by_id(e_label_id) == e_label
edges = v.incoming_edges(e_label_id)
for edge in edges:
edge.vertex().id()
if e_label == b"knows":
assert context.edge_property_num(e_label_id) == 2
assert context.edge_property_num(e_label) == 2
for prop in context.edge_properties(e_label):
prop_id = context.get_edge_property_id_by_name(e_label, prop.first)
assert (
context.get_edge_property_by_id(e_label, prop_id) == prop.first
)
prop_id = context.get_edge_property_id_by_name(
e_label_id, prop.first
)
assert (
context.get_edge_property_by_id(e_label_id, prop_id)
== prop.first
)
for prop in context.edge_properties(e_label_id):
pass
for v_label in context.vertex_labels():
v_label_id = context.get_vertex_label_id_by_name(v_label)
assert context.get_vertex_label_by_id(v_label_id) == v_label
if v_label == b"person":
assert context.vertex_property_num(v_label_id) == 8
assert context.vertex_property_num(v_label) == 8
for prop in context.vertex_properties(v_label):
prop_id = context.get_vertex_property_id_by_name(
v_label, prop.first
)
assert (
context.get_vertex_property_by_id(v_label, prop_id)
== prop.first
)
for prop in context.vertex_properties(v_label_id):
pass
# Example of pregel sssp (with combine)
@pregel(vd_type="double", md_type="double")
class SSSP_Pregel_Combine(AppAssets):
@staticmethod
def Init(v, context):
v.set_value(1000000000.0)
@staticmethod
def Compute(messages, v, context):
src_id = context.get_config(b"src")
cur_dist = v.value()
new_dist = 1000000000.0
if v.id() == src_id:
new_dist = 0
for message in messages:
new_dist = min(message, new_dist)
if new_dist < cur_dist:
v.set_value(new_dist)
for e_label_id in range(context.edge_label_num()):
edges = v.outgoing_edges(e_label_id)
for e in edges:
v.send(e.vertex(), new_dist + e.get_int(2))
v.vote_to_halt()
@staticmethod
def Combine(messages):
ret = 1000000000.0
for m in messages:
ret = min(ret, m)
return ret
# Example of pregel aggregator test
@pregel(vd_type="double", md_type="double")
class Aggregators_Pregel_Test(AppAssets):
@staticmethod
def Init(v, context):
# int
context.register_aggregator(
b"int_sum_aggregator", PregelAggregatorType.kInt64SumAggregator
)
context.register_aggregator(
b"int_max_aggregator", PregelAggregatorType.kInt64MaxAggregator
)
context.register_aggregator(
b"int_min_aggregator", PregelAggregatorType.kInt64MinAggregator
)
context.register_aggregator(
b"int_product_aggregator", PregelAggregatorType.kInt64ProductAggregator
)
context.register_aggregator(
b"int_overwrite_aggregator", PregelAggregatorType.kInt64OverwriteAggregator
)
# double
context.register_aggregator(
b"double_sum_aggregator", PregelAggregatorType.kDoubleSumAggregator
)
context.register_aggregator(
b"double_max_aggregator", PregelAggregatorType.kDoubleMaxAggregator
)
context.register_aggregator(
b"double_min_aggregator", PregelAggregatorType.kDoubleMinAggregator
)
context.register_aggregator(
b"double_product_aggregator", PregelAggregatorType.kDoubleProductAggregator
)
context.register_aggregator(
b"double_overwrite_aggregator",
PregelAggregatorType.kDoubleOverwriteAggregator,
)
# bool
context.register_aggregator(
b"bool_and_aggregator", PregelAggregatorType.kBoolAndAggregator
)
context.register_aggregator(
b"bool_or_aggregator", PregelAggregatorType.kBoolOrAggregator
)
context.register_aggregator(
b"bool_overwrite_aggregator", PregelAggregatorType.kBoolOverwriteAggregator
)
# text
context.register_aggregator(
b"text_append_aggregator", PregelAggregatorType.kTextAppendAggregator
)
@staticmethod
def Compute(messages, v, context):
if context.superstep() == 0:
context.aggregate(b"int_sum_aggregator", 1)
context.aggregate(b"int_max_aggregator", int(v.id()))
context.aggregate(b"int_min_aggregator", int(v.id()))
context.aggregate(b"int_product_aggregator", 1)
context.aggregate(b"int_overwrite_aggregator", 1)
context.aggregate(b"double_sum_aggregator", 1.0)
context.aggregate(b"double_max_aggregator", float(v.id()))
context.aggregate(b"double_min_aggregator", float(v.id()))
context.aggregate(b"double_product_aggregator", 1.0)
context.aggregate(b"double_overwrite_aggregator", 1.0)
context.aggregate(b"bool_and_aggregator", True)
context.aggregate(b"bool_or_aggregator", False)
context.aggregate(b"bool_overwrite_aggregator", True)
context.aggregate(b"text_append_aggregator", v.id() + b",")
else:
if v.id() == b"1":
assert context.get_aggregated_value(b"int_sum_aggregator") == 62586
assert context.get_aggregated_value(b"int_max_aggregator") == 62586
assert context.get_aggregated_value(b"int_min_aggregator") == 1
assert context.get_aggregated_value(b"int_product_aggregator") == 1
assert context.get_aggregated_value(b"int_overwrite_aggregator") == 1
assert context.get_aggregated_value(b"double_sum_aggregator") == 62586.0
assert context.get_aggregated_value(b"double_max_aggregator") == 62586.0
assert context.get_aggregated_value(b"double_min_aggregator") == 1.0
assert context.get_aggregated_value(b"double_product_aggregator") == 1.0
assert (
context.get_aggregated_value(b"double_overwrite_aggregator") == 1.0
)
assert context.get_aggregated_value(b"bool_and_aggregator") == True
assert context.get_aggregated_value(b"bool_or_aggregator") == False
assert (
context.get_aggregated_value(b"bool_overwrite_aggregator") == True
)
context.get_aggregated_value(b"text_append_aggregator")
v.vote_to_halt()
@pregel(vd_type="string", md_type="string")
class PregelVertexTraversal(AppAssets):
"""Write Vertex properties.
Formats: prop1,prop2,...,propN,id
"""
@staticmethod
def Init(v, context):
v.set_value(b"")
@staticmethod
def Compute(messages, v, context):
rlt = string(b"")
first = True
for prop in v.properties():
if not first:
rlt.append(b",")
first = False
if prop.second == b"DOUBLE":
rlt.append(to_string(v.get_double(prop.first)))
elif prop.second == b"LONG":
rlt.append(to_string(v.get_int(prop.first)))
elif prop.second == b"STRING":
rlt.append(v.get_str(prop.first))
v.set_value(rlt)
v.vote_to_halt()
@pregel(vd_type="string", md_type="string")
class PregelEdgeTraversal(AppAssets):
"""Write Edge properties, together with src/dst id.
Formats: e_label,src_id,dst_id,prop1,...,propN
"""
@staticmethod
def Init(v, context):
v.set_value(b"")
@staticmethod
def Compute(messages, v, context):
rlt = string(b"")
e_labels = context.edge_labels()
first = True
for e_label in e_labels:
edges = v.outgoing_edges(e_label)
for e in edges:
if not first:
rlt.append(b"|")
first = False
rlt.append(e_label)
rlt.append(b",")
rlt.append(v.id())
rlt.append(b",")
rlt.append(e.vertex().id())
for prop in context.edge_properties(e_label):
rlt.append(b",")
e_prop_id = context.get_edge_property_id_by_name(
e_label, prop.first
)
if prop.second == b"DOUBLE":
rlt.append(to_string(e.get_double(e_prop_id)))
elif prop.second == b"LONG":
rlt.append(to_string(e.get_int(e_prop_id)))
elif prop.second == b"STRING":
rlt.append(e.get_str(e_prop_id))
v.set_value(rlt)
v.vote_to_halt()
# Example of get schema in pregel model
@pregel(vd_type="string", md_type="string")
class Pregel_GetSchema(AppAssets):
@staticmethod
def Init(v, context):
v.set_value(string(b""))
@staticmethod
def Compute(messages, v, context):
rlt = v.value()
for v_label in context.vertex_labels():
rlt.append(v_label)
rlt.append(b",")
for v_property in context.vertex_properties(v.label()):
rlt.append(v_property.first)
rlt.append(b",")
rlt.append(v_property.second)
rlt.append(b",")
e_labels = context.edge_labels()
for e_label in e_labels:
rlt.append(e_label)
rlt.append(b",")
for e_property in context.edge_properties(e_label):
rlt.append(e_property.first)
rlt.append(b",")
rlt.append(e_property.second)
rlt.append(b",")
v.set_value(rlt)
v.vote_to_halt()
# Example of pie sssp
@pie(vd_type="double", md_type="double")
class SSSP_PIE(AppAssets):
@staticmethod
def Init(frag, context):
v_label_num = frag.vertex_label_num()
for v_label_id in range(v_label_num):
nodes = frag.nodes(v_label_id)
context.init_value(
nodes, v_label_id, 1000000000.0, PIEAggregateType.kMinAggregate
)
context.register_sync_buffer(v_label_id, MessageStrategy.kSyncOnOuterVertex)
@staticmethod
def PEval(frag, context):
src = int(context.get_config(b"src"))
graphscope.declare(graphscope.Vertex, source)
native_source = False
v_label_num = frag.vertex_label_num()
for v_label_id in range(v_label_num):
if frag.get_inner_node(v_label_id, src, source):
native_source = True
break
if native_source:
context.set_node_value(source, 0)
else:
return
e_label_num = frag.edge_label_num()
for e_label_id in range(e_label_num):
edges = frag.get_outgoing_edges(source, e_label_id)
for e in edges:
dst = e.neighbor()
distv = e.get_int(2)
if context.get_node_value(dst) > distv:
context.set_node_value(dst, distv)
@staticmethod
def IncEval(frag, context):
v_label_num = frag.vertex_label_num()
e_label_num = frag.edge_label_num()
for v_label_id in range(v_label_num):
iv = frag.inner_nodes(v_label_id)
for v in iv:
v_dist = context.get_node_value(v)
for e_label_id in range(e_label_num):
es = frag.get_outgoing_edges(v, e_label_id)
for e in es:
u = e.neighbor()
u_dist = v_dist + e.get_int(2)
if context.get_node_value(u) > u_dist:
context.set_node_value(u, u_dist)
# Example of get schema in pie model
@pie(vd_type="string", md_type="string")
class PIE_GetSchema(AppAssets):
@staticmethod
def Init(frag, context):
v_label_num = frag.vertex_label_num()
for i in range(0, v_label_num):
nodes = frag.nodes(i)
context.init_value(
nodes, i, string(b""), PIEAggregateType.kTextAppendAggregate
)
@staticmethod
def PEval(frag, context):
v_label_num = frag.vertex_label_num()
for v_label_id in range(0, v_label_num):
iv = frag.inner_nodes(v_label_id)
for v in iv:
rlt = context.get_node_value(v)
for v_label in frag.vertex_labels():
rlt.append(v_label)
rlt.append(b",")
for v_property in frag.vertex_properties(v_label_id):
rlt.append(v_property.first)
rlt.append(b",")
rlt.append(v_property.second)
rlt.append(b",")
e_labels = frag.edge_labels()
for e_label in e_labels:
rlt.append(e_label)
rlt.append(b",")
for e_property in frag.edge_properties(e_label):
rlt.append(e_property.first)
rlt.append(b",")
rlt.append(e_property.second)
rlt.append(b",")
context.set_node_value(v, rlt)
@staticmethod
def IncEval(frag, context):
pass
# Example of pregel sssp
@pregel(vd_type="double", md_type="double")
class MathInAlgorithm(AppAssets):
@staticmethod
def Init(v, context):
v.set_value(context.math.log2(1000000000.0 * context.math.M_PI))
@staticmethod
def Compute(messages, v, context):
v.vote_to_halt()
def test_error_with_missing_necessary_method():
with pytest.raises(ValueError, match="Can't find method definition"):
@pregel(vd_type="double", md_type="double")
class Pregel_1:
@staticmethod
def Init(v, context):
pass
with pytest.raises(ValueError, match="Can't find method definition"):
@pregel(vd_type="double", md_type="double")
class Pregel_2:
@staticmethod
def Compute(message, v, context):
pass
with pytest.raises(ValueError, match="Can't find method definition"):
@pie(vd_type="double", md_type="double")
class PIE_1:
@staticmethod
def Init(frag, context):
pass
@staticmethod
def PEval(frag, context):
pass
with pytest.raises(ValueError, match="Can't find method definition"):
@pie(vd_type="double", md_type="double")
class PIE_2:
@staticmethod
def Init(v, context):
pass
@staticmethod
def IncEval(frag, context):
pass
with pytest.raises(ValueError, match="Can't find method definition"):
@pie(vd_type="double", md_type="double")
class PIE_3:
@staticmethod
def PEval(frag, context):
pass
@staticmethod
def IncEval(frag, context):
pass
def test_error_with_missing_staticmethod_keyword():
with pytest.raises(ValueError, match="Missing staticmethod decorator"):
@pregel(vd_type="double", md_type="double")
class Pregel_1:
@staticmethod
def Init(v, context):
pass
def Compute(message, v, context):
pass
with pytest.raises(ValueError, match="Missing staticmethod decorator"):
@pie(vd_type="double", md_type="double")
class PIE_1:
def Init(frag, context):
pass
def PEval(frag, context):
pass
def IncEval(frag, context):
pass
def test_error_with_method_signature():
with pytest.raises(AssertionError, match="The number of parameters does not match"):
@pregel(vd_type="double", md_type="double")
class Pregel_1:
@staticmethod
def Init(v): # missing context
pass
@staticmethod
def Compute(message, v, context):
pass
with pytest.raises(AssertionError, match="The number of parameters does not match"):
@pregel(vd_type="double", md_type="double")
class Pregel_2:
@staticmethod
def Init(v, context):
pass
@staticmethod
def Compute(v, context): # misssing message
pass
with pytest.raises(AssertionError, match="The number of parameters does not match"):
@pregel(vd_type="double", md_type="double")
class Pregel_3:
@staticmethod
def Init(v, context, other): # more args
pass
@staticmethod
def Compute(message, v, context):
pass
with pytest.raises(AssertionError, match="The number of parameters does not match"):
@pie(vd_type="double", md_type="double")
class PIE_1:
@staticmethod
def Init(frag): # missing context
pass
@staticmethod
def PEval(frag, context):
pass
@staticmethod
def IncEval(frag, context):
pass
with pytest.raises(AssertionError, match="The number of parameters does not match"):
@pie(vd_type="double", md_type="double")
class PIE_2:
@staticmethod
def Init(frag, context):
pass
@staticmethod
def PEval(frag): # missing context
pass
@staticmethod
def IncEval(frag, context):
pass
with pytest.raises(AssertionError, match="The number of parameters does not match"):
@pie(vd_type="double", md_type="double")
class PIE_3:
@staticmethod
def Init(frag, context):
pass
@staticmethod
def PEval(frag, context):
pass
@staticmethod
def IncEval(frag): # missing context
pass
with pytest.raises(AssertionError, match="The number of parameters does not match"):
@pie(vd_type="double", md_type="double")
class PIE_4:
@staticmethod
def Init(frag, context, message): # more args
pass
@staticmethod
def PEval(frag, context):
pass
@staticmethod
def IncEval(frag, context):
pass
def test_extra_method_definition():
with pytest.raises(RuntimeError, match="Not recognized method"):
@pregel(vd_type="double", md_type="double")
class Pregel_1:
@staticmethod
def Init(v, context):
pass
@staticmethod
def Compute(message, v, context):
pass
@staticmethod
def util(self): # extra staticmethod
pass
with pytest.raises(RuntimeError, match="Not recognized method"):
@pie(vd_type="double", md_type="double")
class PIE_1:
@staticmethod
def Init(frag, context):
pass
@staticmethod
def PEval(frag, context):
pass
@staticmethod
def IncEval(frag, context):
pass
@staticmethod # extra staticmethod
def util():
pass
def test_error_with_import_module():
with pytest.raises(RuntimeError, match="Import is not supported yet"):
@pregel(vd_type="double", md_type="double")
class Pregel_1:
@staticmethod
def Init(v, context):
import random
pass
@staticmethod
def Compute(message, v, context):
pass
with pytest.raises(RuntimeError, match="ImportFrom is not supported yet"):
@pregel(vd_type="double", md_type="double")
class Pregel_1:
@staticmethod
def Init(v, context):
from os import path
pass
@staticmethod
def Compute(message, v, context):
pass
def test_dump_gar(random_gar, not_exist_gar):
SSSP_Pregel.to_gar(random_gar)
# gar file already exist
with pytest.raises(RuntimeError, match="Path exist"):
SSSP_Pregel.to_gar(random_gar)
# not exist dir, also works with permission denied
with pytest.raises(FileNotFoundError, match="No such file or directory"):
SSSP_Pregel.to_gar(not_exist_gar)
def test_load_app_from_gar(random_gar, not_exist_gar, non_zipfile_gar):
# file not exist, also works with permission denied
with pytest.raises(FileNotFoundError, match="No such file or directory"):
ast1 = load_app(not_exist_gar)
# not a zip file
with pytest.raises(ValueError, match="not a zip file"):
ast2 = load_app(non_zipfile_gar)
# type error
with pytest.raises(ValueError, match="Wrong type"):
ast3 = load_app([1, 2, 3, 4])
with pytest.raises(ValueError, match="Wrong type"):
ast4 = load_app(gar=None)
SSSP_Pregel.to_gar(random_gar)
ast1 = load_app(random_gar)
assert isinstance(ast1, AppAssets)
def test_error_on_create_cython_app(
graphscope_session, dynamic_property_graph, random_gar, empty_gar
):
SSSP_Pregel.to_gar(random_gar)
with pytest.raises(InvalidArgumentError, match="App is uncompatible with graph"):
a1 = load_app(random_gar)
a1(dynamic_property_graph, src=4)
# algo not found in gar resource
with pytest.raises(InvalidArgumentError, match="App not found in gar: sssp"):
a2 = load_app(algo="sssp", gar=random_gar)
a2(p2p_property_graph, src=6)
# no `.gs_conf.yaml` in empty gar, raise KeyError exception
with pytest.raises(KeyError):
a3 = load_app(algo="SSSP_Pregel", gar=empty_gar)
a3(p2p_property_graph, src=6)
@pytest.mark.skipif("FULL-TEST-SUITE" not in os.environ, reason="Run in nightly CI")
def test_get_schema(graphscope_session, arrow_property_graph):
# pregel
a1 = Pregel_GetSchema()
ctx1 = a1(arrow_property_graph)
r1 = ctx1.to_numpy("r:v0", vertex_range={"begin": 0, "end": 7})
assert r1.tolist() == [
"v0,v1,dist,DOUBLE,id,LONG,e0,weight,LONG,e1,weight,LONG,",
"v0,v1,dist,DOUBLE,id,LONG,e0,weight,LONG,e1,weight,LONG,",
"v0,v1,dist,DOUBLE,id,LONG,e0,weight,LONG,e1,weight,LONG,",
]
# pie
a2 = PIE_GetSchema()
ctx2 = a2(arrow_property_graph)
r2 = ctx2.to_numpy("r:v0", vertex_range={"begin": 0, "end": 7})
assert r2.tolist() == [
"v0,v1,dist,DOUBLE,id,LONG,e0,weight,LONG,e1,weight,LONG,",
"v0,v1,dist,DOUBLE,id,LONG,e0,weight,LONG,e1,weight,LONG,",
"v0,v1,dist,DOUBLE,id,LONG,e0,weight,LONG,e1,weight,LONG,",
]
@pytest.mark.skipif("FULL-TEST-SUITE" not in os.environ, reason="Run in nightly CI")
def test_run_cython_pregel_app(
graphscope_session, p2p_property_graph, sssp_result, random_gar
):
SSSP_Pregel.to_gar(random_gar)
a1 = SSSP_Pregel()
ctx1 = a1(p2p_property_graph, src=6)
r1 = (
ctx1.to_dataframe({"node": "v:person.id", "r": "r:person"})
.sort_values(by=["node"])
.to_numpy(dtype=float)
)
r1[r1 == 1000000000.0] = float("inf")
assert np.allclose(r1, sssp_result["directed"])
# redundant params is ok
ctx2 = a1(p2p_property_graph, src=6, other="a", yet_other=[1, 2, 3])
r2 = (
ctx2.to_dataframe({"node": "v:person.id", "r": "r:person"})
.sort_values(by=["node"])
.to_numpy(dtype=float)
)
r2[r2 == 1000000000.0] = float("inf")
assert np.allclose(r2, sssp_result["directed"])
# load from gar
a2 = load_app(random_gar)
ctx3 = a2(p2p_property_graph, src=6)
r3 = (
ctx3.to_dataframe({"node": "v:person.id", "r": "r:person"})
.sort_values(by=["node"])
.to_numpy(dtype=float)
)
r3[r3 == 1000000000.0] = float("inf")
assert np.allclose(r3, sssp_result["directed"])
# args is not supported
with pytest.raises(
InvalidArgumentError, match="Only support using keyword arguments in cython app"
):
a3 = load_app(random_gar)
ctx4 = a3(p2p_property_graph, 6, src=6)
# combine
a5 = SSSP_Pregel_Combine()
ctx5 = a5(p2p_property_graph, src=6)
r5 = (
ctx5.to_dataframe({"node": "v:person.id", "r": "r:person"})
.sort_values(by=["node"])
.to_numpy(dtype=float)
)
r5[r5 == 1000000000.0] = float("inf")
assert np.allclose(r5, sssp_result["directed"])
# aggregator test
a6 = Aggregators_Pregel_Test()
a6(p2p_property_graph)
# math.h function test
a7 = MathInAlgorithm()
a7(p2p_property_graph)
@pytest.mark.skipif("FULL-TEST-SUITE" not in os.environ, reason="Run in nightly CI")
def test_run_cython_pie_app(
graphscope_session, p2p_property_graph, sssp_result, random_gar
):
SSSP_PIE.to_gar(random_gar)
a1 = SSSP_PIE()
ctx1 = a1(p2p_property_graph, src=6)
r1 = (
ctx1.to_dataframe({"node": "v:person.id", "r": "r:person"})
.sort_values(by=["node"])
.to_numpy(dtype=float)
)
r1[r1 == 1000000000.0] = float("inf")
assert np.allclose(r1, sssp_result["directed"])
ctx2 = a1(p2p_property_graph, src=6, other="a", yet_other=[1, 2, 3])
r2 = (
ctx2.to_dataframe({"node": "v:person.id", "r": "r:person"})
.sort_values(by=["node"])
.to_numpy(dtype=float)
)
r2[r2 == 1000000000.0] = float("inf")
assert np.allclose(r2, sssp_result["directed"])
# load from gar
a2 = load_app(random_gar)
ctx3 = a2(p2p_property_graph, src=6)
r3 = (
ctx3.to_dataframe({"node": "v:person.id", "r": "r:person"})
.sort_values(by=["node"])
.to_numpy(dtype=float)
)
r3[r3 == 1000000000.0] = float("inf")
assert np.allclose(r3, sssp_result["directed"])
# args is not supported
with pytest.raises(
InvalidArgumentError, match="Only support using keyword arguments in cython app"
):
a3 = load_app(random_gar)
ctx4 = a3(p2p_property_graph, 6, src=6)
@pytest.mark.skipif("FULL-TEST-SUITE" not in os.environ, reason="Run in nightly CI")
def test_vertex_traversal(arrow_property_graph, twitter_v_0, twitter_v_1):
traversal = PregelVertexTraversal()
ctx = traversal(arrow_property_graph)
# to dataframe
r0 = ctx.to_dataframe({"node": "v:v0.id", "r": "r:v0"})
r1 = ctx.to_dataframe({"node": "v:v1.id", "r": "r:v1"})
def compare_result(df, result_file):
id_col = df["node"].astype("int64")
df = (
pd.DataFrame(df.r.str.split(",").tolist(), columns=["dist", "id"])
.reindex(columns=["id", "dist"])
.astype({"id": "int64", "dist": "float64"})
)
assert id_col.equals(df["id"])
df = df.sort_values(by=["id"]).reset_index(drop=True)
result_df = pd.read_csv(result_file, sep=",")
assert df.equals(result_df)
compare_result(r0, twitter_v_0)
compare_result(r1, twitter_v_1)
@pytest.mark.skipif("FULL-TEST-SUITE" not in os.environ, reason="Run in nightly CI")
def test_modern_graph_vertex_traversal(arrow_modern_graph):
traversal = PregelVertexTraversal()
ctx = traversal(arrow_modern_graph)
r0 = ctx.to_dataframe({"node": "v:person.id", "r": "r:person"})
r1 = ctx.to_dataframe({"node": "v:software.id", "r": "r:software"})
def compare_id(df, names):
id_col = df["node"].astype("int64")
df = (
pd.DataFrame(df.r.str.split(",").tolist(), columns=names + ["id"])
.reindex(columns=["id"] + names)
.astype({"id": "int64"})
)
assert id_col.equals(df["id"])
compare_id(r0, ["name", "age"])
compare_id(r1, ["name", "lang"])
@pytest.mark.skipif("FULL-TEST-SUITE" not in os.environ, reason="Run in nightly CI")
def test_edge_traversal(
arrow_property_graph,
twitter_e_0_0_0,
twitter_e_0_1_0,
twitter_e_1_0_0,
twitter_e_1_1_0,
twitter_e_0_0_1,
twitter_e_0_1_1,
twitter_e_1_0_1,
twitter_e_1_1_1,
):
traversal = PregelEdgeTraversal()
ctx = traversal(arrow_property_graph)
r0 = ctx.to_dataframe({"node": "v:v0.id", "r": "r:v0"})
r1 = ctx.to_dataframe({"node": "v:v1.id", "r": "r:v1"})
edges = []
edges.extend(r0.r.str.split("|").tolist())
edges.extend(r1.r.str.split("|").tolist())
df = pd.read_csv(
io.StringIO("\n".join([item for sublist in edges for item in sublist])),
sep=",",
names=["label", "src", "dst", "weight"],
)
def compare_result(df, *args):
df = df[["src", "dst", "weight"]].astype(
{"src": "int64", "dst": "int64", "weight": "float64"}
)
df = df.sort_values(by=["src", "dst"]).reset_index(drop=True)
result_df = pd.concat(
[pd.read_csv(arg) for arg in args], ignore_index=True, copy=False
)
result_df = result_df.astype(
{"src": "int64", "dst": "int64", "weight": "float64"}
)
result_df = result_df.sort_values(by=["src", "dst"]).reset_index(drop=True)
assert df.equals(result_df)
compare_result(
df[df["label"] == "e0"],
twitter_e_0_0_0,
twitter_e_0_1_0,
twitter_e_1_0_0,
twitter_e_1_1_0,
)
compare_result(
df[df["label"] == "e1"],
twitter_e_0_0_1,
twitter_e_0_1_1,
twitter_e_1_0_1,
twitter_e_1_1_1,
)
def test_pregel_api(graphscope_session, ldbc_graph):
a1 = Pregel_API_Test()
a1(ldbc_graph, param1="graphscope", param2="graphscope2")
def test_pie_api(graphscope_session, ldbc_graph_undirected):
a1 = PIE_API_Test()
a1(ldbc_graph_undirected, param1="graphscope", param2="graphscope2")
| 34.3175 | 106 | 0.58629 |
794212daa58e3054b8c1671b0faac4412b1ab3cd | 2,427 | py | Python | 1000-1100q/1034.py | rampup01/Leetcode | 8450a95a966ef83b24ffe6450f06ce8de92b3efb | [
"MIT"
] | 990 | 2018-06-05T11:49:22.000Z | 2022-03-31T08:59:17.000Z | 1000-1100q/1034.py | rampup01/Leetcode | 8450a95a966ef83b24ffe6450f06ce8de92b3efb | [
"MIT"
] | 1 | 2021-11-01T01:29:38.000Z | 2021-11-01T01:29:38.000Z | 1000-1100q/1034.py | rampup01/Leetcode | 8450a95a966ef83b24ffe6450f06ce8de92b3efb | [
"MIT"
] | 482 | 2018-06-12T22:16:53.000Z | 2022-03-29T00:23:29.000Z | '''
Given a 2-dimensional grid of integers, each value in the grid represents the color of the grid square at that location.
Two squares belong to the same connected component if and only if they have the same color and are next to each other in any of the 4 directions.
The border of a connected component is all the squares in the connected component that are either 4-directionally adjacent to a square not in the component, or on the boundary of the grid (the first or last row or column).
Given a square at location (r0, c0) in the grid and a color, color the border of the connected component of that square with the given color, and return the final grid.
Example 1:
Input: grid = [[1,1],[1,2]], r0 = 0, c0 = 0, color = 3
Output: [[3, 3], [3, 2]]
Example 2:
Input: grid = [[1,2,2],[2,3,2]], r0 = 0, c0 = 1, color = 3
Output: [[1, 3, 3], [2, 3, 3]]
Example 3:
Input: grid = [[1,1,1],[1,1,1],[1,1,1]], r0 = 1, c0 = 1, color = 2
Output: [[2, 2, 2], [2, 1, 2], [2, 2, 2]]
Note:
1 <= grid.length <= 50
1 <= grid[0].length <= 50
1 <= grid[i][j] <= 1000
0 <= r0 < grid.length
0 <= c0 < grid[0].length
1 <= color <= 1000
'''
class Solution(object):
def colorBorder(self, grid, r0, c0, color):
"""
:type grid: List[List[int]]
:type r0: int
:type c0: int
:type color: int
:rtype: List[List[int]]
"""
if not grid:
return grid
visited, border = [], []
m, n = len(grid), len(grid[0])
def dfs(r, c):
if r < 0 or c < 0 or r >= m or c >= n or grid[r][c] != grid[r0][c0] or (r,c) in visited:
return
visited.append((r,c))
# check if the current row, col index is edge of the matrix
# if not then check adjacent cells doesnt have same value as grid[r0][c0] then add in border
if (r == 0 or c == 0 or r == m-1 or c == n-1 or
(r+1 < m and grid[r+1][c] != grid[r0][c0]) or
(r-1 >= 0 and grid[r-1][c] != grid[r0][c0]) or
(c+1 < n and grid[r][c+1] != grid[r0][c0]) or
(c-1 >= 0 and grid[r][c-1] != grid[r0][c0])):
border.append((r,c))
dfs(r-1, c)
dfs(r+1, c)
dfs(r, c-1)
dfs(r, c+1)
dfs(r0, c0)
for (x, y) in border:
grid[x][y] = color
return grid
| 34.671429 | 222 | 0.530284 |
79421477f139cbe753580f6ba988d5552b4d2f23 | 10,666 | py | Python | models/official/unet3d/tpu_executor.py | stefsietz/tpu | 1f8029b7849ccaafd121529a37a7c24613db7453 | [
"Apache-2.0"
] | null | null | null | models/official/unet3d/tpu_executor.py | stefsietz/tpu | 1f8029b7849ccaafd121529a37a7c24613db7453 | [
"Apache-2.0"
] | null | null | null | models/official/unet3d/tpu_executor.py | stefsietz/tpu | 1f8029b7849ccaafd121529a37a7c24613db7453 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Interface to run unet model."""
from __future__ import absolute_import
from __future__ import division
#Standard imports
from __future__ import print_function
import os
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v1 as tf
from hyperparameters import params_dict
FLAGS = flags.FLAGS
def define_tpu_flags():
"""Define common flags for TPU."""
flags.DEFINE_string(
'tpu',
default=None,
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 '
'url.')
flags.DEFINE_string(
'gcp_project',
default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone',
default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_integer(
'num_cores', default=8, help='Number of TPU cores for training')
flags.DEFINE_string(
'eval_master',
default='',
help='GRPC URL of the eval master. Set to an appropiate value when running '
'on CPU/GPU')
flags.DEFINE_bool('use_tpu', True, 'Use TPUs rather than CPUs')
flags.DEFINE_multi_integer(
'input_partition_dims', [1],
'A list that describes the partition dims for all the tensors.')
flags.DEFINE_integer('iterations_per_loop', 8,
'Number of iterations per TPU training loop')
def get_tpu_flags():
"""Get TPU config related FLAGS as dictionary."""
return {
'tpu': FLAGS.tpu,
'gcp_project': FLAGS.gcp_project,
'tpu_zone': FLAGS.tpu_zone,
'num_cores': FLAGS.num_cores,
'eval_master': FLAGS.eval_master,
'use_tpu': FLAGS.use_tpu,
'input_partition_dims': FLAGS.input_partition_dims,
'iterations_per_loop': FLAGS.iterations_per_loop,
}
def write_summary(logs, summary_writer, current_step):
"""Write out summaries of current training step for the checkpoint."""
with tf.Graph().as_default():
summaries = [
tf.Summary.Value(tag=tag, simple_value=value)
for tag, value in logs.items()
]
tf_summary = tf.Summary(value=summaries)
summary_writer.add_summary(tf_summary, current_step)
class TPUEstimatorExecuter(object):
"""An executor class for running jobs on TPUs."""
def __init__(self, model_fn, params, train_input_shapes, eval_input_shapes):
self._model_dir = params.model_dir
self._params = params
self._train_input_shapes = train_input_shapes
self._eval_input_shapes = eval_input_shapes
if train_input_shapes:
self._train_estimator = self._build_estimator(
params.tpu_config, model_fn, params, train_input_shapes)
if eval_input_shapes:
self._eval_estimator = self._build_estimator(
params.tpu_config, model_fn, params, eval_input_shapes)
def _save_params(self):
"""Save parameters to config files if model_dir is defined."""
model_dir = self._model_dir
if model_dir is not None:
if not tf.gfile.Exists(model_dir):
tf.gfile.MakeDirs(model_dir)
params_dict.save_params_dict_to_yaml(self._params,
model_dir + '/params.yaml')
def _build_estimator(self, tpu_flags, model_fn, params, input_shapes):
"""Creates TPUEstimator/Estimator instance.
Args:
tpu_flags: FLAGS of TPU configs for constructing the TPUEstimator.
model_fn: model function that returns (TPU)EstimatorSpec.
params: A ParamsDict of TPU configs and dictionary to pass to Estimator
`model_fn`.
input_shapes: A nested tuple or list indicating the shape of each input.
For example, ([128, 128, 128, 1], [128, 128, 128, 3]).
Returns:
TFEstimator or TPUEstimator instance.
"""
eval_master = tpu_flags.eval_master
logging.info('debug tpu_flags %s', tpu_flags.as_dict())
if tpu_flags.use_tpu:
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu_flags.tpu, zone=tpu_flags.tpu_zone, project=tpu_flags.gcp_project)
tpu_grpc_url = tpu_cluster_resolver.get_master()
if not eval_master:
eval_master = tpu_grpc_url
tf.Session.reset(tpu_grpc_url)
else:
tpu_cluster_resolver = None
dims_overridden = params.input_partition_dims
if tpu_flags.input_partition_dims != [1]:
dims_overridden = tpu_flags.input_partition_dims
if dims_overridden and dims_overridden != [1]:
feature_shape, label_shape = input_shapes
# The input function may drop the last channel dimension. We need to do
# the same for spatial partition dims as well.
# Do not forget the batch dimension.
feature_partition = dims_overridden[:1 + len(feature_shape)]
label_partition = dims_overridden[:1 + len(label_shape)]
input_partition_dims = [
feature_partition,
label_partition,
]
num_cores_per_replica = np.prod(dims_overridden)
num_shards = tpu_flags.num_cores // num_cores_per_replica
else:
num_cores_per_replica = None
input_partition_dims = None
num_shards = tpu_flags.num_cores
# Sets up config for TPUEstimator.
tpu_config = tf.estimator.tpu.TPUConfig(
tpu_flags.iterations_per_loop,
num_shards=num_shards,
num_cores_per_replica=num_cores_per_replica,
input_partition_dims=input_partition_dims,
per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig.PER_HOST_V2 # pylint: disable=line-too-long
)
run_config = tf.estimator.tpu.RunConfig(
cluster=tpu_cluster_resolver,
evaluation_master=eval_master,
model_dir=self._model_dir,
log_step_count_steps=tpu_flags.iterations_per_loop,
tpu_config=tpu_config,
)
model_params = dict(
params.as_dict(),
use_tpu=tpu_flags.use_tpu,
)
return tf.estimator.tpu.TPUEstimator(
model_fn=model_fn,
use_tpu=tpu_flags.use_tpu,
train_batch_size=params.train_batch_size,
eval_batch_size=params.eval_batch_size,
predict_batch_size=params.predict_batch_size,
config=run_config,
params=model_params)
def train(self, input_fn):
"""Training the model with training data and labels in input_fn."""
self._save_params()
self._train_estimator.train(input_fn=input_fn,
max_steps=self._params.train_steps)
def evaluate(self, input_fn):
"""Evaluating the model with data and labels in input_fn."""
output_dir = os.path.join(self._model_dir, 'eval')
tf.gfile.MakeDirs(output_dir)
# Summary writer writes out eval metrics.
summary_writer = tf.summary.FileWriter(output_dir)
def _terminate_eval():
logging.info('Terminating eval after %d seconds of '
'no checkpoints', self._params.eval_timeout)
return True
eval_results = None
# Run evaluation when there's a new checkpoint
for ckpt in tf.train.checkpoints_iterator(
self._model_dir,
min_interval_secs=self._params.min_eval_interval,
timeout=self._params.eval_timeout,
timeout_fn=_terminate_eval):
# Terminate eval job when final checkpoint is reached
current_step = int(os.path.basename(ckpt).split('-')[1])
logging.info('Starting to evaluate.')
try:
eval_results = self._eval_estimator.evaluate(
input_fn=input_fn, steps=self._params.eval_steps)
# Evaluation task could start before checkpoint is written,
# get preempted, or faile to write checkpoint correctly.
if eval_results is not None:
write_summary(eval_results, summary_writer, current_step)
if current_step >= self._params.train_steps:
logging.info('Evaluation finished after training step %d',
current_step)
break
except tf.errors.NotFoundError:
# Since the coordinator is on a different job than the TPU worker,
# sometimes the TPU worker does not finish initializing until long after
# the CPU job tells it to start evaluating. In this case, the checkpoint
# file could have been deleted already.
logging.info('Checkpoint %s no longer exists, skipping checkpoint',
ckpt)
summary_writer.close()
logging.info('Evaluation results %s.', eval_results)
return eval_results
def train_and_eval(self, train_input_fn, eval_input_fn):
"""Run distributed train and eval on UNet model."""
self._save_params()
output_dir = os.path.join(self._model_dir, 'eval')
tf.gfile.MakeDirs(output_dir)
summary_writer = tf.summary.FileWriter(output_dir)
num_cycles = int(self._params.train_steps / self._params.num_steps_per_eval)
for cycle in range(num_cycles):
logging.info('Start training cycle %d.', cycle)
self._train_estimator.train(
input_fn=train_input_fn, steps=self._params.num_steps_per_eval)
logging.info('Start evaluation cycle %d.', cycle)
eval_results = self._eval_estimator.evaluate(
input_fn=eval_input_fn, steps=self._params.eval_steps)
current_step = int(cycle * self._params.num_steps_per_eval)
write_summary(eval_results, summary_writer, current_step)
logging.info('Starting training cycle %d.', num_cycles)
self._train_estimator.train(
input_fn=train_input_fn, steps=self._params.train_steps)
eval_results = self._eval_estimator.evaluate(
input_fn=eval_input_fn, steps=self._params.eval_steps)
write_summary(eval_results, summary_writer, self._params.train_steps)
summary_writer.close()
logging.info('Evaluation results %s.', eval_results)
return eval_results
| 38.229391 | 117 | 0.698387 |
7942148814f66eadf0bc58b4b738d4e7672e3946 | 1,694 | py | Python | networks/cis_depth4.py | rfww/EfficientChangeDetection | 42d466c56ed262980c27fd6cde6ffe65314e638f | [
"BSD-Source-Code"
] | null | null | null | networks/cis_depth4.py | rfww/EfficientChangeDetection | 42d466c56ed262980c27fd6cde6ffe65314e638f | [
"BSD-Source-Code"
] | null | null | null | networks/cis_depth4.py | rfww/EfficientChangeDetection | 42d466c56ed262980c27fd6cde6ffe65314e638f | [
"BSD-Source-Code"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.utils import model_zoo
from torchvision import models
from utils.spp_layer import spatial_pyramid_pool
class CIS_VGGBN(nn.Module):
def __init__(self, backbone='vgg16_bn', pretrained=True, freeze_backbone=False):
super(CIS_VGGBN, self).__init__()
self.output_num = [4,2,1]
vgg = models.vgg16_bn(pretrained)
features = list(vgg.features.children())
self.dec1 = nn.Sequential(*features[:7]) # 160
# self.dec2 = nn.Sequential(*features[5:10]) # 80
# self.dec3 = nn.Sequential(*features[10:17]) # 40
# self.dec4 = nn.Sequential(*features[17:24]) # 20
# self.dec5 = nn.Sequential(*features[24:]) # 10
self.cis1 = nn.Sequential(
nn.Conv2d(128, 64, 3, padding=1,stride=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True)
)
self.cis2 = nn.Sequential(
nn.Linear(1344, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Linear(256, 128),
nn.ReLU(),
nn.Linear(128, 2)
)
self.dp = nn.Softmax()
def forward(self, x, y,bt):
x_f1 = self.dec1(x)
y_f1 = self.dec1(y)
enc = torch.cat((x_f1, y_f1), 1)
clc1 = self.cis1(enc)
spp = spatial_pyramid_pool(clc1,bt,[clc1.size(2),clc1.size(3)], self.output_num)
clc2 = self.cis2(spp)
dp = self.dp(clc2)
return x_f1, y_f1, dp
# def initialize(self):
# self.load_state_dict(torch.load('../res/resnet50-19c8e357.pth'), strict=False)
| 33.215686 | 89 | 0.578512 |
794215fe8e739599f3a1be55fd7682d28ffc6021 | 4,450 | py | Python | src/moveit2/moveit_demo_nodes/run_moveit_cpp/launch/run_moveit_cpp2.launch.py | iamrajee/ros2eloquent_moveit_ws | 210d7d6d90c19e68a3ea17aee3a3dc9ee13c8a89 | [
"MIT"
] | 17 | 2020-03-03T14:38:20.000Z | 2021-08-13T08:28:16.000Z | src/moveit2/moveit_demo_nodes/run_moveit_cpp/launch/run_moveit_cpp2.launch.py | iamrajee/ros2eloquent_moveit_ws | 210d7d6d90c19e68a3ea17aee3a3dc9ee13c8a89 | [
"MIT"
] | 1 | 2020-03-12T08:10:13.000Z | 2020-03-30T16:20:33.000Z | src/moveit2/moveit_demo_nodes/run_moveit_cpp/launch/run_moveit_cpp2.launch.py | iamrajee/ros2eloquent_moveit_ws | 210d7d6d90c19e68a3ea17aee3a3dc9ee13c8a89 | [
"MIT"
] | 2 | 2020-04-03T06:56:01.000Z | 2021-08-13T08:28:18.000Z | import os
import yaml
from launch import LaunchDescription
from launch_ros.actions import Node
from ament_index_python.packages import get_package_share_directory
# Helper function to load file
def load_file(package_name, file_path):
package_path = get_package_share_directory(package_name)
absolute_file_path = os.path.join(package_path, file_path)
try:
with open(absolute_file_path, 'r') as file:
return file.read()
except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available
return None
# Helper function to load yaml
def load_yaml(package_name, file_path):
package_path = get_package_share_directory(package_name)
absolute_file_path = os.path.join(package_path, file_path)
try:
with open(absolute_file_path, 'r') as file:
return yaml.load(file)
except EnvironmentError: # parent of IOError, OSError *and* WindowsError where available
return None
def generate_launch_description(): #main funtion
# ========= load files(yaml,urdf,srdf,rviz) ========= #
rviz_config_file = get_package_share_directory('run_moveit_cpp') + "/launch/run_moveit_cpp2.rviz"
moveit_cpp_yaml_file_name = get_package_share_directory('run_moveit_cpp') + "/config/moveit_cpp2.yaml" ### ---- moveit_cpp.yaml is passed by filename for now since it's node specific
panda_controllers_yaml = os.path.join(get_package_share_directory("run_moveit_cpp"), "config", "panda_controllers2.yaml")
start_positions_yaml = os.path.join(get_package_share_directory("run_moveit_cpp"), "config", "start_positions2.yaml")
controllers_yaml = load_yaml('run_moveit_cpp', 'config/controllers2.yaml')
robot_description_config = load_file('moveit_resources', 'panda_description/urdf/panda.urdf') # Component yaml files are grouped in separate namespaces
robot_description_semantic_config = load_file('moveit_resources', 'panda_moveit_config/config/panda.srdf')
kinematics_yaml = load_yaml('moveit_resources', 'panda_moveit_config/config/kinematics.yaml')
ompl_planning_yaml = load_yaml('moveit_resources', 'panda_moveit_config/config/ompl_planning.yaml')
# ======== params ======== #
robot_description = {'robot_description' : robot_description_config}
robot_description_semantic = {'robot_description_semantic' : robot_description_semantic_config}
robot_description_kinematics = { 'robot_description_kinematics' : kinematics_yaml }
moveit_controllers = { 'moveit_simple_controller_manager' : controllers_yaml }
ompl_planning_pipeline_config = { 'ompl' : { 'planning_plugin' : 'ompl_interface/OMPLPlanner', 'request_adapters' : """default_planner_request_adapters/AddTimeOptimalParameterization default_planner_request_adapters/FixWorkspaceBounds default_planner_request_adapters/FixStartStateBounds default_planner_request_adapters/FixStartStateCollision default_planner_request_adapters/FixStartStatePathConstraints""" , 'start_state_max_bounds_error' : 0.1 } }
ompl_planning_pipeline_config['ompl'].update(ompl_planning_yaml)
# ========= Nodes ========= #
# --------- run_moveit_cpp node ---------
run_moveit_cpp_node = Node(node_name='run_moveit_cpp', package='run_moveit_cpp', node_executable='run_moveit_cpp2', output='screen', parameters=[moveit_cpp_yaml_file_name, robot_description, robot_description_semantic, kinematics_yaml, ompl_planning_pipeline_config, moveit_controllers]) #TODO(henningkayser): add debug argument #prefix='xterm -e gdb --args',
# --------- RViz ---------
rviz_node = Node(package='rviz2', node_executable='rviz2', node_name='rviz2', output='log', arguments=['-d', rviz_config_file], parameters=[robot_description])
# --------- static TF ---------
static_tf = Node(package='tf2_ros', node_executable='static_transform_publisher', node_name='static_transform_publisher', output='log', arguments=['0.0', '0.0', '0.0', '0.0', '0.0', '0.0', 'world', 'panda_link0'])
# --------- Fake joint driver ---------
fake_joint_driver_node = Node(package='fake_joint_driver', node_executable='fake_joint_driver_node', parameters=[panda_controllers_yaml, start_positions_yaml, robot_description] ) ###TODO(JafarAbdi): Why this launch the two nodes (controller manager and the fake joint driver) with the same name! # node_name='fake_joint_driver_node',
return LaunchDescription( [static_tf, rviz_node, run_moveit_cpp_node, fake_joint_driver_node] ) | 75.423729 | 455 | 0.760225 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.