max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
deps/pybind11/tests/test_smart_ptr.py
|
Mistobaan/coremltools
| 11,356 |
129720
|
<filename>deps/pybind11/tests/test_smart_ptr.py
import pytest
from pybind11_tests import ConstructorStats
def test_smart_ptr(capture):
# Object1
from pybind11_tests import (MyObject1, make_object_1, make_object_2,
print_object_1, print_object_2, print_object_3, print_object_4)
for i, o in enumerate([make_object_1(), make_object_2(), MyObject1(3)], start=1):
assert o.getRefCount() == 1
with capture:
print_object_1(o)
print_object_2(o)
print_object_3(o)
print_object_4(o)
assert capture == "MyObject1[{i}]\n".format(i=i) * 4
from pybind11_tests import (make_myobject1_1, make_myobject1_2,
print_myobject1_1, print_myobject1_2,
print_myobject1_3, print_myobject1_4)
for i, o in enumerate([make_myobject1_1(), make_myobject1_2(), MyObject1(6), 7], start=4):
print(o)
with capture:
if not isinstance(o, int):
print_object_1(o)
print_object_2(o)
print_object_3(o)
print_object_4(o)
print_myobject1_1(o)
print_myobject1_2(o)
print_myobject1_3(o)
print_myobject1_4(o)
assert capture == "MyObject1[{i}]\n".format(i=i) * (4 if isinstance(o, int) else 8)
cstats = ConstructorStats.get(MyObject1)
assert cstats.alive() == 0
expected_values = ['MyObject1[{}]'.format(i) for i in range(1, 7)] + ['MyObject1[7]'] * 4
assert cstats.values() == expected_values
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
# Object2
from pybind11_tests import (MyObject2, make_myobject2_1, make_myobject2_2,
make_myobject3_1, make_myobject3_2,
print_myobject2_1, print_myobject2_2,
print_myobject2_3, print_myobject2_4)
for i, o in zip([8, 6, 7], [MyObject2(8), make_myobject2_1(), make_myobject2_2()]):
print(o)
with capture:
print_myobject2_1(o)
print_myobject2_2(o)
print_myobject2_3(o)
print_myobject2_4(o)
assert capture == "MyObject2[{i}]\n".format(i=i) * 4
cstats = ConstructorStats.get(MyObject2)
assert cstats.alive() == 1
o = None
assert cstats.alive() == 0
assert cstats.values() == ['MyObject2[8]', 'MyObject2[6]', 'MyObject2[7]']
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
# Object3
from pybind11_tests import (MyObject3, print_myobject3_1, print_myobject3_2,
print_myobject3_3, print_myobject3_4)
for i, o in zip([9, 8, 9], [MyObject3(9), make_myobject3_1(), make_myobject3_2()]):
print(o)
with capture:
print_myobject3_1(o)
print_myobject3_2(o)
print_myobject3_3(o)
print_myobject3_4(o)
assert capture == "MyObject3[{i}]\n".format(i=i) * 4
cstats = ConstructorStats.get(MyObject3)
assert cstats.alive() == 1
o = None
assert cstats.alive() == 0
assert cstats.values() == ['MyObject3[9]', 'MyObject3[8]', 'MyObject3[9]']
assert cstats.default_constructions == 0
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
# Object and ref
from pybind11_tests import Object, cstats_ref
cstats = ConstructorStats.get(Object)
assert cstats.alive() == 0
assert cstats.values() == []
assert cstats.default_constructions == 10
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
cstats = cstats_ref()
assert cstats.alive() == 0
assert cstats.values() == ['from pointer'] * 10
assert cstats.default_constructions == 30
assert cstats.copy_constructions == 12
# assert cstats.move_constructions >= 0 # Doesn't invoke any
assert cstats.copy_assignments == 30
assert cstats.move_assignments == 0
def test_smart_ptr_refcounting():
from pybind11_tests import test_object1_refcounting
assert test_object1_refcounting()
def test_unique_nodelete():
from pybind11_tests import MyObject4
o = MyObject4(23)
assert o.value == 23
cstats = ConstructorStats.get(MyObject4)
assert cstats.alive() == 1
del o
cstats = ConstructorStats.get(MyObject4)
assert cstats.alive() == 1 # Leak, but that's intentional
def test_shared_ptr_and_references():
from pybind11_tests.smart_ptr import SharedPtrRef, A
s = SharedPtrRef()
stats = ConstructorStats.get(A)
assert stats.alive() == 2
ref = s.ref # init_holder_helper(holder_ptr=false, owned=false)
assert stats.alive() == 2
assert s.set_ref(ref)
with pytest.raises(RuntimeError) as excinfo:
assert s.set_holder(ref)
assert "Unable to cast from non-held to held instance" in str(excinfo.value)
copy = s.copy # init_holder_helper(holder_ptr=false, owned=true)
assert stats.alive() == 3
assert s.set_ref(copy)
assert s.set_holder(copy)
holder_ref = s.holder_ref # init_holder_helper(holder_ptr=true, owned=false)
assert stats.alive() == 3
assert s.set_ref(holder_ref)
assert s.set_holder(holder_ref)
holder_copy = s.holder_copy # init_holder_helper(holder_ptr=true, owned=true)
assert stats.alive() == 3
assert s.set_ref(holder_copy)
assert s.set_holder(holder_copy)
del ref, copy, holder_ref, holder_copy, s
assert stats.alive() == 0
def test_shared_ptr_from_this_and_references():
from pybind11_tests.smart_ptr import SharedFromThisRef, B
s = SharedFromThisRef()
stats = ConstructorStats.get(B)
assert stats.alive() == 2
ref = s.ref # init_holder_helper(holder_ptr=false, owned=false, bad_wp=false)
assert stats.alive() == 2
assert s.set_ref(ref)
assert s.set_holder(ref) # std::enable_shared_from_this can create a holder from a reference
bad_wp = s.bad_wp # init_holder_helper(holder_ptr=false, owned=false, bad_wp=true)
assert stats.alive() == 2
assert s.set_ref(bad_wp)
with pytest.raises(RuntimeError) as excinfo:
assert s.set_holder(bad_wp)
assert "Unable to cast from non-held to held instance" in str(excinfo.value)
copy = s.copy # init_holder_helper(holder_ptr=false, owned=true, bad_wp=false)
assert stats.alive() == 3
assert s.set_ref(copy)
assert s.set_holder(copy)
holder_ref = s.holder_ref # init_holder_helper(holder_ptr=true, owned=false, bad_wp=false)
assert stats.alive() == 3
assert s.set_ref(holder_ref)
assert s.set_holder(holder_ref)
holder_copy = s.holder_copy # init_holder_helper(holder_ptr=true, owned=true, bad_wp=false)
assert stats.alive() == 3
assert s.set_ref(holder_copy)
assert s.set_holder(holder_copy)
del ref, bad_wp, copy, holder_ref, holder_copy, s
assert stats.alive() == 0
def test_move_only_holder():
from pybind11_tests.smart_ptr import TypeWithMoveOnlyHolder
a = TypeWithMoveOnlyHolder.make()
stats = ConstructorStats.get(TypeWithMoveOnlyHolder)
assert stats.alive() == 1
del a
assert stats.alive() == 0
|
django-rest-framework-fast/characters/migrations/0001_squashed_0003_auto_20190416_1625.py
|
mervatkheir/kite-python-blog-post-code
| 238 |
129751
|
# Generated by Django 2.2 on 2019-04-16 19:59
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Line',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('line_text', models.TextField()),
],
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='Character',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField()),
('profession', models.CharField(max_length=50)),
('type', models.CharField(choices=[('hero', 'Hero'), ('villain', 'Villain'), ('sidekick', 'Sidekick'), ('comedic', 'Comedic'), ('extra', 'Extra'), ('solo', 'Solo Artist')], default='extra', max_length=20)),
('mentor', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='proteges', to='characters.Character')),
('team', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='characters.Team')),
],
),
migrations.CreateModel(
name='LineModifier',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('modifier', models.CharField(max_length=50)),
('character', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='line_modifiers', to='characters.Character')),
],
),
]
|
funboost/utils/dependency_packages/mongomq/utils.py
|
DJMIN/funboost
| 333 |
129767
|
<filename>funboost/utils/dependency_packages/mongomq/utils.py
def enum(name, *sequential, **named):
values = dict(zip(sequential, range(len(sequential))), **named)
# NOTE: Yes, we *really* want to cast using str() here.
# On Python 2 type() requires a byte string (which is str() on Python 2).
# On Python 3 it does not matter, so we'll use str(), which acts as
# a no-op.
return type(str(name), (), values)
|
tests/rest/test_rest.py
|
edmont/ot-br-posix
| 174 |
129779
|
<reponame>edmont/ot-br-posix
#!/usr/bin/env python3
#
# Copyright (c) 2020, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import urllib.request
import urllib.error
import ipaddress
import json
import re
from threading import Thread
rest_api_addr = "http://0.0.0.0:8081"
def assert_is_ipv6_address(string):
assert (type(ipaddress.ip_address(string)) is ipaddress.IPv6Address)
def get_data_from_url(url, result, index):
response = urllib.request.urlopen(urllib.request.Request(url))
body = response.read()
data = json.loads(body)
result[index] = data
def get_error_from_url(url, result, index):
try:
urllib.request.urlopen(urllib.request.Request(url))
assert False
except urllib.error.HTTPError as e:
result[index] = e
def create_multi_thread(func, url, thread_num, response_data):
threads = [None] * thread_num
for i in range(thread_num):
threads[i] = Thread(target=func, args=(url, response_data, i))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def error404_check(data):
assert data is not None
assert (data.code == 404)
return True
def diagnostics_check(data):
assert data is not None
if len(data) == 0:
return 1
for diag in data:
expected_keys = [
"ExtAddress", "Rloc16", "Mode", "Connectivity", "Route",
"LeaderData", "NetworkData", "IP6AddressList", "MACCounters",
"ChildTable", "ChannelPages"
]
expected_value_type = [
str, int, dict, dict, dict, dict, str, list, dict, list,
str
]
expected_check_dict = dict(zip(expected_keys, expected_value_type))
for key, value in expected_check_dict.items():
assert (key in diag)
assert (type(diag[key]) == value)
assert (re.match(r'^[A-F0-9]{16}$', diag["ExtAddress"]) is not None)
mode = diag["Mode"]
mode_expected_keys = [
"RxOnWhenIdle", "DeviceType", "NetworkData"
]
for key in mode_expected_keys:
assert (key in mode)
assert (type(mode[key]) == int)
connectivity = diag["Connectivity"]
connectivity_expected_keys = [
"ParentPriority", "LinkQuality3", "LinkQuality2", "LinkQuality1",
"LeaderCost", "IdSequence", "ActiveRouters", "SedBufferSize",
"SedDatagramCount"
]
for key in connectivity_expected_keys:
assert (key in connectivity)
assert (type(connectivity[key]) == int)
route = diag["Route"]
assert ("IdSequence" in route)
assert (type(route["IdSequence"]) == int)
assert ("RouteData" in route)
route_routedata = route["RouteData"]
assert (type(route["RouteData"]) == list)
routedata_expected_keys = [
"RouteId", "LinkQualityOut", "LinkQualityIn", "RouteCost"
]
for item in route_routedata:
for key in routedata_expected_keys:
assert (key in item)
assert (type(item[key]) == int)
leaderdata = diag["LeaderData"]
leaderdata_expected_keys = [
"PartitionId", "Weighting", "DataVersion", "StableDataVersion",
"LeaderRouterId"
]
for key in leaderdata_expected_keys:
assert (key in leaderdata)
assert (type(leaderdata[key]) == int)
assert (re.match(r'^[A-F0-9]{12}$', diag["NetworkData"]) is not None)
ip6_address_list = diag["IP6AddressList"]
assert (type(ip6_address_list) == list)
for ip6_address in ip6_address_list:
assert (type(ip6_address) == str)
assert_is_ipv6_address(ip6_address)
mac_counters = diag["MACCounters"]
assert (type(mac_counters) == dict)
mac_counters_expected_keys = [
"IfInUnknownProtos", "IfInErrors", "IfOutErrors", "IfInUcastPkts",
"IfInBroadcastPkts", "IfInDiscards", "IfOutUcastPkts",
"IfOutBroadcastPkts", "IfOutDiscards"
]
for key in mac_counters_expected_keys:
assert (key in mac_counters)
assert (type(mac_counters[key]) == int)
child_table = diag["ChildTable"]
assert (type(child_table) == list)
for child in child_table:
assert ("ChildId" in child)
assert (type(child["ChildId"]) == int)
assert ("Timeout" in child)
assert (type(child["Timeout"]) == int)
assert ("Mode" in child)
mode = child["Mode"]
assert (type(mode) == dict)
for key in mode_expected_keys:
assert (key in mode)
assert (type(mode[key]) == int)
assert (type(diag["ChannelPages"]) == str)
assert (re.match(r'^[A-F0-9]{2}$', diag["ChannelPages"]) is not None)
return 2
def node_check(data):
assert data is not None
expected_keys = [
"State", "NumOfRouter", "RlocAddress", "NetworkName", "ExtAddress",
"Rloc16", "LeaderData", "ExtPanId"
]
expected_value_type = [
int, int, str, str, str, int, dict, str
]
expected_check_dict = dict(zip(expected_keys, expected_value_type))
for key, value in expected_check_dict.items():
assert (key in data)
assert (type(data[key]) == value)
assert_is_ipv6_address(data["RlocAddress"])
assert (re.match(r'^[A-F0-9]{16}$', data["ExtAddress"]) is not None)
assert (re.match(r'[A-F0-9]{16}', data["ExtPanId"]) is not None)
leaderdata = data["LeaderData"]
leaderdata_expected_keys = [
"PartitionId", "Weighting", "DataVersion", "StableDataVersion",
"LeaderRouterId"
]
for key in leaderdata_expected_keys:
assert (key in leaderdata)
assert (type(leaderdata[key]) == int)
return True
def node_rloc_check(data):
assert data is not None
assert (type(data) == str)
assert_is_ipv6_address(data)
return True
def node_rloc16_check(data):
assert data is not None
assert (type(data) == int)
return True
def node_ext_address_check(data):
assert data is not None
assert (type(data) == str)
assert (re.match(r'^[A-F0-9]{16}$', data) is not None)
return True
def node_state_check(data):
assert data is not None
assert (type(data) == int)
return True
def node_network_name_check(data):
assert data is not None
assert (type(data) == str)
return True
def node_leader_data_check(data):
assert data is not None
assert (type(data) == dict)
leaderdata_expected_keys = [
"PartitionId", "Weighting", "DataVersion", "StableDataVersion",
"LeaderRouterId"
]
for key in leaderdata_expected_keys:
assert (key in data)
assert (type(data[key]) == int)
return True
def node_num_of_router_check(data):
assert data is not None
assert (type(data) == int)
return True
def node_ext_panid_check(data):
assert data is not None
assert (type(data) == str)
return True
def node_test(thread_num):
url = rest_api_addr + "/node"
response_data = [None] * thread_num
create_multi_thread(get_data_from_url, url, thread_num, response_data)
valid = [node_check(data) for data in response_data].count(True)
print(" /node : all {}, valid {} ".format(thread_num, valid))
def node_rloc_test(thread_num):
url = rest_api_addr + "/node/rloc"
response_data = [None] * thread_num
create_multi_thread(get_data_from_url, url, thread_num, response_data)
valid = [node_rloc_check(data) for data in response_data].count(True)
print(" /node/rloc : all {}, valid {} ".format(thread_num, valid))
def node_rloc16_test(thread_num):
url = rest_api_addr + "/node/rloc16"
response_data = [None] * thread_num
create_multi_thread(get_data_from_url, url, thread_num, response_data)
valid = [node_rloc16_check(data) for data in response_data].count(True)
print(" /node/rloc16 : all {}, valid {} ".format(thread_num, valid))
def node_ext_address_test(thread_num):
url = rest_api_addr + "/node/ext-address"
response_data = [None] * thread_num
create_multi_thread(get_data_from_url, url, thread_num, response_data)
valid = [node_ext_address_check(data) for data in response_data].count(True)
print(" /node/ext-address : all {}, valid {} ".format(thread_num, valid))
def node_state_test(thread_num):
url = rest_api_addr + "/node/state"
response_data = [None] * thread_num
create_multi_thread(get_data_from_url, url, thread_num, response_data)
valid = [node_state_check(data) for data in response_data].count(True)
print(" /node/state : all {}, valid {} ".format(thread_num, valid))
def node_network_name_test(thread_num):
url = rest_api_addr + "/node/network-name"
response_data = [None] * thread_num
create_multi_thread(get_data_from_url, url, thread_num, response_data)
valid = [node_network_name_check(data) for data in response_data
].count(True)
print(" /node/network-name : all {}, valid {} ".format(thread_num, valid))
def node_leader_data_test(thread_num):
url = rest_api_addr + "/node/leader-data"
response_data = [None] * thread_num
create_multi_thread(get_data_from_url, url, thread_num, response_data)
valid = [node_leader_data_check(data) for data in response_data].count(True)
print(" /node/leader-data : all {}, valid {} ".format(thread_num, valid))
def node_num_of_router_test(thread_num):
url = rest_api_addr + "/node/num-of-router"
response_data = [None] * thread_num
create_multi_thread(get_data_from_url, url, thread_num, response_data)
valid = [node_num_of_router_check(data) for data in response_data
].count(True)
print(" /v1/node/num-of-router : all {}, valid {} ".format(thread_num, valid))
def node_ext_panid_test(thread_num):
url = rest_api_addr + "/node/ext-panid"
response_data = [None] * thread_num
create_multi_thread(get_data_from_url, url, thread_num, response_data)
valid = [node_ext_panid_check(data) for data in response_data].count(True)
print(" /node/ext-panid : all {}, valid {} ".format(thread_num, valid))
def diagnostics_test(thread_num):
url = rest_api_addr + "/diagnostics"
response_data = [None] * thread_num
create_multi_thread(get_data_from_url, url, thread_num, response_data)
valid = 0
has_content = 0
for data in response_data:
ret = diagnostics_check(data)
if ret == 1:
valid += 1
elif ret == 2:
valid += 1
has_content += 1
print(" /diagnostics : all {}, has content {}, valid {} ".format(
thread_num, has_content, valid))
def error_test(thread_num):
url = rest_api_addr + "/hello"
response_data = [None] * thread_num
create_multi_thread(get_error_from_url, url, thread_num, response_data)
valid = [error404_check(data) for data in response_data].count(True)
print(" /v1/hello : all {}, valid {} ".format(thread_num, valid))
def main():
node_test(200)
node_rloc_test(200)
node_rloc16_test(200)
node_ext_address_test(200)
node_state_test(200)
node_network_name_test(200)
node_leader_data_test(200)
node_num_of_router_test(200)
node_ext_panid_test(200)
diagnostics_test(20)
error_test(10)
return 0
if __name__ == '__main__':
exit(main())
|
gpytorch/functions/matern_covariance.py
|
llguo95/gpytorch
| 2,673 |
129784
|
<gh_stars>1000+
import math
import torch
class MaternCovariance(torch.autograd.Function):
@staticmethod
def forward(ctx, x1, x2, lengthscale, nu, dist_func):
if any(ctx.needs_input_grad[:2]):
raise RuntimeError("MaternCovariance cannot compute gradients with " "respect to x1 and x2")
if lengthscale.size(-1) > 1:
raise ValueError("MaternCovariance cannot handle multiple lengthscales")
# Subtract mean for numerical stability. Won't affect computations
# because covariance matrix is stationary.
needs_grad = any(ctx.needs_input_grad)
mean = x1.reshape(-1, x1.size(-1)).mean(0)[(None,) * (x1.dim() - 1)]
x1_ = (x1 - mean).div(lengthscale)
x2_ = (x2 - mean).div(lengthscale)
scaled_unitless_dist = dist_func(x1_, x2_).mul_(math.sqrt(2 * nu))
if nu == 0.5:
# 1 kernel sized Tensor if no grad else 2
scaled_unitless_dist_ = scaled_unitless_dist.clone() if needs_grad else scaled_unitless_dist
exp_component = scaled_unitless_dist_.neg_().exp_()
covar_mat = exp_component
if needs_grad:
d_output_d_input = scaled_unitless_dist.div_(lengthscale).mul_(exp_component)
elif nu == 1.5:
# 2 kernel sized Tensors if no grad else 3
if needs_grad:
scaled_unitless_dist_ = scaled_unitless_dist.clone()
linear_term = scaled_unitless_dist.clone().add_(1)
exp_component = scaled_unitless_dist.neg_().exp_()
covar_mat = linear_term.mul_(exp_component)
if needs_grad:
d_output_d_input = scaled_unitless_dist_.pow_(2).div_(lengthscale).mul_(exp_component)
elif nu == 2.5:
# 3 kernel sized Tensors if no grad else 4
linear_term = scaled_unitless_dist.clone().add_(1)
quadratic_term = scaled_unitless_dist.clone().pow_(2).div_(3)
exp_component = scaled_unitless_dist.neg_().exp_()
if needs_grad:
covar_mat = (linear_term + quadratic_term).mul_(exp_component)
d_output_d_input = linear_term.mul_(quadratic_term).mul_(exp_component).div_(lengthscale)
else:
covar_mat = exp_component.mul_(linear_term.add_(quadratic_term))
if needs_grad:
ctx.save_for_backward(d_output_d_input)
return covar_mat
@staticmethod
def backward(ctx, grad_output):
d_output_d_input = ctx.saved_tensors[0]
lengthscale_grad = grad_output * d_output_d_input
return None, None, lengthscale_grad, None, None
|
geopyspark/geotrellis/protobuf/extentMessages_pb2.py
|
geotrellis/geotrellis-python
| 182 |
129792
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: extentMessages.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='extentMessages.proto',
package='protos',
syntax='proto3',
serialized_pb=_b('\n\x14\x65xtentMessages.proto\x12\x06protos\"\'\n\x08ProtoCRS\x12\x0c\n\x04\x65psg\x18\x01 \x01(\x05\x12\r\n\x05proj4\x18\x02 \x01(\t\"E\n\x0bProtoExtent\x12\x0c\n\x04xmin\x18\x01 \x01(\x01\x12\x0c\n\x04ymin\x18\x02 \x01(\x01\x12\x0c\n\x04xmax\x18\x03 \x01(\x01\x12\x0c\n\x04ymax\x18\x04 \x01(\x01\"Z\n\x14ProtoProjectedExtent\x12#\n\x06\x65xtent\x18\x01 \x01(\x0b\x32\x13.protos.ProtoExtent\x12\x1d\n\x03\x63rs\x18\x02 \x01(\x0b\x32\x10.protos.ProtoCRS\"s\n\x1cProtoTemporalProjectedExtent\x12#\n\x06\x65xtent\x18\x01 \x01(\x0b\x32\x13.protos.ProtoExtent\x12\x1d\n\x03\x63rs\x18\x02 \x01(\x0b\x32\x10.protos.ProtoCRS\x12\x0f\n\x07instant\x18\x03 \x01(\x04\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_PROTOCRS = _descriptor.Descriptor(
name='ProtoCRS',
full_name='protos.ProtoCRS',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='epsg', full_name='protos.ProtoCRS.epsg', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='proj4', full_name='protos.ProtoCRS.proj4', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=32,
serialized_end=71,
)
_PROTOEXTENT = _descriptor.Descriptor(
name='ProtoExtent',
full_name='protos.ProtoExtent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='xmin', full_name='protos.ProtoExtent.xmin', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ymin', full_name='protos.ProtoExtent.ymin', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='xmax', full_name='protos.ProtoExtent.xmax', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ymax', full_name='protos.ProtoExtent.ymax', index=3,
number=4, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=73,
serialized_end=142,
)
_PROTOPROJECTEDEXTENT = _descriptor.Descriptor(
name='ProtoProjectedExtent',
full_name='protos.ProtoProjectedExtent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='extent', full_name='protos.ProtoProjectedExtent.extent', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crs', full_name='protos.ProtoProjectedExtent.crs', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=144,
serialized_end=234,
)
_PROTOTEMPORALPROJECTEDEXTENT = _descriptor.Descriptor(
name='ProtoTemporalProjectedExtent',
full_name='protos.ProtoTemporalProjectedExtent',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='extent', full_name='protos.ProtoTemporalProjectedExtent.extent', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='crs', full_name='protos.ProtoTemporalProjectedExtent.crs', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='instant', full_name='protos.ProtoTemporalProjectedExtent.instant', index=2,
number=3, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=236,
serialized_end=351,
)
_PROTOPROJECTEDEXTENT.fields_by_name['extent'].message_type = _PROTOEXTENT
_PROTOPROJECTEDEXTENT.fields_by_name['crs'].message_type = _PROTOCRS
_PROTOTEMPORALPROJECTEDEXTENT.fields_by_name['extent'].message_type = _PROTOEXTENT
_PROTOTEMPORALPROJECTEDEXTENT.fields_by_name['crs'].message_type = _PROTOCRS
DESCRIPTOR.message_types_by_name['ProtoCRS'] = _PROTOCRS
DESCRIPTOR.message_types_by_name['ProtoExtent'] = _PROTOEXTENT
DESCRIPTOR.message_types_by_name['ProtoProjectedExtent'] = _PROTOPROJECTEDEXTENT
DESCRIPTOR.message_types_by_name['ProtoTemporalProjectedExtent'] = _PROTOTEMPORALPROJECTEDEXTENT
ProtoCRS = _reflection.GeneratedProtocolMessageType('ProtoCRS', (_message.Message,), dict(
DESCRIPTOR = _PROTOCRS,
__module__ = 'extentMessages_pb2'
# @@protoc_insertion_point(class_scope:protos.ProtoCRS)
))
_sym_db.RegisterMessage(ProtoCRS)
ProtoExtent = _reflection.GeneratedProtocolMessageType('ProtoExtent', (_message.Message,), dict(
DESCRIPTOR = _PROTOEXTENT,
__module__ = 'extentMessages_pb2'
# @@protoc_insertion_point(class_scope:protos.ProtoExtent)
))
_sym_db.RegisterMessage(ProtoExtent)
ProtoProjectedExtent = _reflection.GeneratedProtocolMessageType('ProtoProjectedExtent', (_message.Message,), dict(
DESCRIPTOR = _PROTOPROJECTEDEXTENT,
__module__ = 'extentMessages_pb2'
# @@protoc_insertion_point(class_scope:protos.ProtoProjectedExtent)
))
_sym_db.RegisterMessage(ProtoProjectedExtent)
ProtoTemporalProjectedExtent = _reflection.GeneratedProtocolMessageType('ProtoTemporalProjectedExtent', (_message.Message,), dict(
DESCRIPTOR = _PROTOTEMPORALPROJECTEDEXTENT,
__module__ = 'extentMessages_pb2'
# @@protoc_insertion_point(class_scope:protos.ProtoTemporalProjectedExtent)
))
_sym_db.RegisterMessage(ProtoTemporalProjectedExtent)
# @@protoc_insertion_point(module_scope)
|
scripts/variance_reduction.py
|
JannikWirtz/importance-sampling-diagnostics
| 289 |
129802
|
#!/usr/bin/env python
#
# Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/
# Written by <NAME> <<EMAIL>>
#
import argparse
import os
from os import path
from keras import backend as K
from keras.losses import get as get_loss
from keras.utils.generic_utils import Progbar
import numpy as np
from importance_sampling import models
from importance_sampling.datasets import CIFAR10, CIFAR100, MNIST, \
OntheflyAugmentedImages, ImageNetDownsampled, PennTreeBank, ZCAWhitening
from importance_sampling.model_wrappers import OracleWrapper
from importance_sampling.reweighting import BiasedReweightingPolicy
from importance_sampling.utils import tf_config
from importance_sampling.utils.functional import compose, partial, ___
def build_grad(network):
"""Return the gradient of the network."""
x = network.input
y = network.output
target_shape = (None, 1) if "sparse" in network.loss else K.int_shape(y)
y_true = K.placeholder(shape=target_shape)
sample_weights = K.placeholder(shape=(None,))
l = K.mean(sample_weights * get_loss(network.loss)(y_true, y))
grads = network.optimizer.get_gradients(l, network.trainable_weights)
grad = K.concatenate([
K.reshape(g, (-1,))
for g in grads
])
return K.function(
[x, y_true, sample_weights],
[grad]
)
def build_grad_batched(network, batch_size):
"""Compute the average gradient by splitting the inputs in batches of size
'batch_size' and averaging."""
grad = build_grad(network)
def inner(inputs):
X, y, w = inputs
N = len(X)
g = 0
for i in range(0, N, batch_size):
g = g + w[i:i+batch_size].sum() * grad([
X[i:i+batch_size],
y[i:i+batch_size],
w[i:i+batch_size]
])[0]
return [g / w.sum()]
return inner
def load_dataset(dataset):
datasets = {
"mnist": MNIST,
"cifar10": CIFAR10,
"cifar100": CIFAR100,
"cifar10-augmented": compose(
partial(OntheflyAugmentedImages, ___, dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=False
)),
CIFAR10
),
"cifar10-whitened-augmented": compose(
partial(OntheflyAugmentedImages, ___, dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=False
), N=15*10**5),
ZCAWhitening,
CIFAR10
),
"cifar100-augmented": compose(
partial(OntheflyAugmentedImages, ___, dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=False
)),
CIFAR100
),
"cifar100-whitened-augmented": compose(
partial(OntheflyAugmentedImages, ___, dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=False
), N=15*10**5),
ZCAWhitening,
CIFAR100
),
"imagenet-32x32": partial(
ImageNetDownsampled,
os.getenv("IMAGENET"),
size=32
),
"ptb": partial(PennTreeBank, 20),
}
return datasets[dataset]()
def uniform_score(x, y, batch_size=None):
return np.ones((len(x),))
def main(argv):
parser = argparse.ArgumentParser(
description=("Compute the variance reduction achieved by different "
"importance sampling methods")
)
parser.add_argument(
"model",
choices=[
"small_cnn", "cnn", "wide_resnet_28_2", "lstm_lm"
],
help="Choose the type of the model"
)
parser.add_argument(
"weights",
help="The file containing the model weights"
)
parser.add_argument(
"dataset",
choices=[
"mnist", "cifar10", "cifar100", "cifar10-augmented",
"cifar100-augmented", "imagenet-32x32", "ptb",
"cifar10-whitened-augmented", "cifar100-whitened-augmented"
],
help="Choose the dataset to compute the loss"
)
parser.add_argument(
"--samples",
type=int,
default=10,
help="How many samples to choose"
)
parser.add_argument(
"--score",
choices=["gnorm", "full_gnorm", "loss", "ones"],
nargs="+",
default="loss",
help="Choose a score to perform sampling with"
)
parser.add_argument(
"--batch_size",
type=int,
default=128,
help="The batch size for computing the loss"
)
parser.add_argument(
"--inner_batch_size",
type=int,
default=32,
help=("The batch size to use for gradient computations "
"(to decrease memory usage)")
)
parser.add_argument(
"--sample_size",
type=int,
default=1024,
help="The sample size to compute the variance reduction"
)
parser.add_argument(
"--random_seed",
type=int,
default=0,
help="A seed for the PRNG (mainly used for dataset generation)"
)
parser.add_argument(
"--save_scores",
help="Directory to save the scores in"
)
args = parser.parse_args(argv)
np.random.seed(args.random_seed)
dataset = load_dataset(args.dataset)
network = models.get(args.model)(dataset.shape, dataset.output_size)
network.load_weights(args.weights)
grad = build_grad_batched(network, args.inner_batch_size)
reweighting = BiasedReweightingPolicy()
# Compute the full gradient
idxs = np.random.choice(len(dataset.train_data), args.sample_size)
x, y = dataset.train_data[idxs]
full_grad = grad([x, y, np.ones(len(x))])[0]
# Sample and approximate
for score_metric in args.score:
if score_metric != "ones":
model = OracleWrapper(network, reweighting, score=score_metric)
score = model.score
else:
score = uniform_score
gs = np.zeros(shape=(10,) + full_grad.shape, dtype=np.float32)
print "Calculating %s..." % (score_metric,)
scores = score(x, y, batch_size=1)
p = scores/scores.sum()
pb = Progbar(args.samples)
for i in range(args.samples):
pb.update(i)
idxs = np.random.choice(args.sample_size, args.batch_size, p=p)
w = reweighting.sample_weights(idxs, scores).ravel()
gs[i] = grad([x[idxs], y[idxs], w])[0]
pb.update(args.samples)
norms = np.sqrt(((full_grad - gs)**2).sum(axis=1))
alignment = gs.dot(full_grad[:, np.newaxis]) / np.sqrt(np.sum(full_grad**2))
alignment /= np.sqrt((gs**2).sum(axis=1, keepdims=True))
print "Mean of norms of diff", np.mean(norms)
print "Variance of norms of diff", np.var(norms)
print "Mean of alignment", np.mean(alignment)
print "Variance of alignment", np.var(alignment)
if args.save_scores:
np.savetxt(
path.join(args.save_scores, score_metric+".txt"),
scores
)
if __name__ == "__main__":
import sys
main(sys.argv[1:])
|
sdk/python/pulumi_aws/outposts/get_sites.py
|
alexbowers/pulumi-aws
| 260 |
129803
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetSitesResult',
'AwaitableGetSitesResult',
'get_sites',
]
@pulumi.output_type
class GetSitesResult:
"""
A collection of values returned by getSites.
"""
def __init__(__self__, id=None, ids=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ids and not isinstance(ids, list):
raise TypeError("Expected argument 'ids' to be a list")
pulumi.set(__self__, "ids", ids)
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def ids(self) -> Sequence[str]:
"""
Set of Outposts Site identifiers.
"""
return pulumi.get(self, "ids")
class AwaitableGetSitesResult(GetSitesResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSitesResult(
id=self.id,
ids=self.ids)
def get_sites(opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSitesResult:
"""
Provides details about multiple Outposts Sites.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
all = aws.outposts.get_sites()
```
"""
__args__ = dict()
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:outposts/getSites:getSites', __args__, opts=opts, typ=GetSitesResult).value
return AwaitableGetSitesResult(
id=__ret__.id,
ids=__ret__.ids)
|
openmdao/utils/file_wrap.py
|
friedenhe/OpenMDAO
| 451 |
129809
|
<filename>openmdao/utils/file_wrap.py
"""
A collection of utilities for file wrapping.
Note: This is a work in progress.
"""
import re
from pyparsing import CaselessLiteral, Combine, OneOrMore, Optional, \
TokenConverter, Word, nums, oneOf, printables, ParserElement, alphanums
import numpy as np
def _getformat(val):
"""
Get the output format for a floating point number.
The general format is used with 16 places of accuracy, except for when
the floating point value is an integer, in which case a decimal point
followed by a single zero is used.
Parameters
----------
val : float or int
the number which needs formatted.
Returns
-------
string
the format string.
"""
if int(val) == val:
return "%.1f"
else:
return "%.16g"
class _SubHelper(object):
"""
Replaces file text at the correct word location in a line.
This class contains the Helper Function that is passed to re.sub.
Attributes
----------
_newtext : str
text to insert.
_replace_location : int
location in the file where replacement is to occur.
_current_location : int
current location in the file.
_counter : int
counter
_start_location : int
initial location where replacement is to occur.
_end_location : int
final location where replacement is to occur.
"""
def __init__(self):
"""
Initialize attributes.
"""
self._newtext = ""
self._replace_location = 0
self._current_location = 0
self._counter = 0
self._start_location = 0
self._end_location = 0
def set(self, newtext, location):
"""
Set a new word location and value for replacement.
Parameters
----------
newtext : str
text to insert.
location : int
location in the file where replacement is to occur.
"""
self._newtext = newtext
self._replace_location = location
self._current_location = 0
def set_array(self, newtext, start_location, end_location):
"""
Set a new starting location, ending location, and value for replacement.
Parameters
----------
newtext : str
text to insert.
start_location : int
location
end_location : int
location
"""
self._newtext = newtext
self._start_location = start_location
self._end_location = end_location
self._current_location = 0
def replace(self, text):
"""
Replace text in file.
This function should be passed to re.sub.
Parameters
----------
text : str
text to insert.
Returns
-------
string
newtext if current location is replace location else the input text.
"""
self._current_location += 1
if self._current_location == self._replace_location:
if isinstance(self._newtext, float):
return _getformat(self._newtext) % self._newtext
else:
return str(self._newtext)
else:
return text.group()
def replace_array(self, text):
"""
Replace array of text values in file.
This function should be passed to re.sub.
Parameters
----------
text : str
text to insert.
Returns
-------
string
newtext if current location is replace location else the input text.
"""
self._current_location += 1
end = len(self._newtext)
if self._current_location >= self._start_location and \
self._current_location <= self._end_location and \
self._counter < end:
if isinstance(self._newtext[self._counter], float):
val = self._newtext[self._counter]
newval = _getformat(val) % val
else:
newval = str(self._newtext[self._counter])
self._counter += 1
return newval
else:
return text.group()
class _ToInteger(TokenConverter):
"""
Converter for PyParsing that is used to turn a token into an int.
"""
def postParse(self, instring, loc, tokenlist):
"""
Convert token into an integer.
Parameters
----------
instring : str
the input string
loc : int
the location of the matching string
tokenlist : list
list of matched tokens
Returns
-------
int
integer value for token.
"""
return int(tokenlist[0])
class _ToFloat(TokenConverter):
"""
Converter for PyParsing that is used to turn a token into a float.
"""
def postParse(self, instring, loc, tokenlist):
"""
Convert token into a float.
Parameters
----------
instring : str
the input string
loc : int
the location of the matching string
tokenlist : list
list of matched tokens
Returns
-------
float
float value for token.
"""
return float(tokenlist[0].replace('D', 'E'))
class _ToNan(TokenConverter):
"""
Converter for PyParsing that is used to turn a token into Python nan.
"""
def postParse(self, instring, loc, tokenlist):
"""
Convert token into Python nan.
Parameters
----------
instring : str
the input string
loc : int
the location of the matching string
tokenlist : list
list of matched tokens
Returns
-------
float
the float value for NaN.
"""
return float('nan')
class _ToInf(TokenConverter):
"""
Converter for PyParsing that is used to turn a token into Python inf.
"""
def postParse(self, instring, loc, tokenlist):
"""
Convert token into Python inf.
Parameters
----------
instring : str
the input string
loc : int
the location of the matching string
tokenlist : list
list of matched tokens
Returns
-------
float
the float value for infinity.
"""
return float('inf')
class InputFileGenerator(object):
"""
Utility to generate an input file from a template.
Substitution of values is supported. Data is located with a simple API.
Attributes
----------
_template_filename : str or None
the name of the template file.
_output_filename : str or None
the name of the output file.
_delimiter : int
delimiter.
_reg : int
regular expression.
_data : list of string
the contents of the file, by line
_current_row : int
the current row of the file
_anchored : bool
indicator that position is relative to a landmark location.
"""
def __init__(self):
"""
Initialize attributes.
"""
self._template_filename = None
self._output_filename = None
self._delimiter = " "
self._reg = re.compile('[^ \n]+')
self._data = []
self._current_row = 0
self._anchored = False
def set_template_file(self, filename):
"""
Set the name of the template file to be used.
The template file is also read into memory when this method is called.
Parameters
----------
filename : str
Name of the template file to be used.
"""
self._template_filename = filename
templatefile = open(filename, 'r')
self._data = templatefile.readlines()
templatefile.close()
def set_generated_file(self, filename):
"""
Set the name of the file that will be generated.
Parameters
----------
filename : str
Name of the input file to be generated.
"""
self._output_filename = filename
def set_delimiters(self, delimiter):
"""
Set the delimiters that are used to identify field boundaries.
Parameters
----------
delimiter : str
A string containing characters to be used as delimiters.
"""
self._delimiter = delimiter
self._reg = re.compile('[^' + delimiter + '\n]+')
def mark_anchor(self, anchor, occurrence=1):
"""
Mark the location of a landmark.
This lets you describe data by relative position. Note that a forward
search begins at the old anchor location. If you want to restart the
search for the anchor at the file beginning, then call ``reset_anchor()``
before ``mark_anchor``.
Parameters
----------
anchor : str
The text you want to search for.
occurrence : int, optional
Find nth instance of text; default is 1 (first). Use -1 to
find last occurrence. Reverse searches always start at the end
of the file no matter the state of any previous anchor.
"""
if not isinstance(occurrence, int):
raise ValueError("The value for occurrence must be an integer")
instance = 0
if occurrence > 0:
count = 0
max_lines = len(self._data)
for index in range(self._current_row, max_lines):
line = self._data[index]
# If we are marking a new anchor from an existing anchor, and
# the anchor is mid-line, then we still search the line, but
# only after the anchor.
if count == 0 and self._anchored:
line = line.split(anchor)[-1]
if line.find(anchor) > -1:
instance += 1
if instance == occurrence:
self._current_row += count
self._anchored = True
return
count += 1
elif occurrence < 0:
max_lines = len(self._data) - 1
count = max_lines
for index in range(max_lines, -1, -1):
line = self._data[index]
# If we are marking a new anchor from an existing anchor, and
# the anchor is mid-line, then we still search the line, but
# only before the anchor.
if count == max_lines and self._anchored:
line = line.split(anchor)[0]
if line.find(anchor) > -1:
instance += -1
if instance == occurrence:
self._current_row = count
self._anchored = True
return
count -= 1
else:
raise ValueError("0 is not valid for an anchor occurrence.")
raise RuntimeError("Could not find pattern %s in template file %s" %
(anchor, self._template_filename))
def reset_anchor(self):
"""
Reset anchor to the beginning of the file.
"""
self._current_row = 0
self._anchored = False
def transfer_var(self, value, row, field):
"""
Change a single variable in the template relative to the current anchor.
Parameters
----------
value : float, int, bool, str
New value to set at the location.
row : int
Number of lines offset from anchor line (0 is anchor line).
This can be negative.
field : int
Which word in line to replace, as denoted by delimiter(s).
"""
j = self._current_row + row
line = self._data[j]
sub = _SubHelper()
sub.set(value, field)
newline = re.sub(self._reg, sub.replace, line)
self._data[j] = newline
def transfer_array(self, value, row_start, field_start, field_end,
row_end=None, sep=", "):
"""
Change the values of an array in the template relative to the current anchor.
This should generally be used for one-dimensional or free form arrays.
Parameters
----------
value : float, int, bool, str
Array of values to insert.
row_start : int
Starting row for inserting the array. This is relative
to the anchor, and can be negative.
field_start : int
Starting field in the given row_start as denoted by
delimiter(s).
field_end : int
The final field the array uses in row_end.
We need this to figure out if the template is too small or large.
row_end : int, optional
Use if the array wraps to cover additional lines.
sep : int, optional
Separator to use if we go beyond the template.
"""
# Simplified input for single-line arrays
if row_end is None:
row_end = row_start
sub = _SubHelper()
for row in range(row_start, row_end + 1):
j = self._current_row + row
line = self._data[j]
if row == row_end:
f_end = field_end
else:
f_end = 99999
sub.set_array(value, field_start, f_end)
field_start = 0
newline = re.sub(self._reg, sub.replace_array, line)
self._data[j] = newline
# Sometimes an array is too large for the example in the template
# This is resolved by adding more fields at the end
if sub._counter < len(value):
for val in value[sub._counter:]:
newline = newline.rstrip() + sep + str(val)
self._data[j] = newline
# Sometimes an array is too small for the template
# This is resolved by removing fields
elif sub._counter > len(value):
# TODO - Figure out how to handle this.
# Ideally, we'd remove the extra field placeholders
raise ValueError("Array is too small for the template.")
def transfer_2Darray(self, value, row_start, row_end, field_start, field_end):
"""
Change the values of a 2D array in the template relative to the current anchor.
This method is specialized for 2D arrays, where each row of the array is
on its own line.
Parameters
----------
value : ndarray
Array of values to insert.
row_start : int
Starting row for inserting the array. This is relative
to the anchor, and can be negative.
row_end : int
Final row for the array, relative to the anchor.
field_start : int
Starting field in the given row_start as denoted by
delimiter(s).
field_end : int
The final field the array uses in row_end.
We need this to figure out if the template is too small or large.
"""
sub = _SubHelper()
i = 0
for row in range(row_start, row_end + 1):
j = self._current_row + row
line = self._data[j]
sub.set_array(value[i, :], field_start, field_end)
newline = re.sub(self._reg, sub.replace_array, line)
self._data[j] = newline
sub._current_location = 0
sub._counter = 0
i += 1
# TODO - Note, we currently can't handle going beyond the end of
# the template line
def clearline(self, row):
"""
Replace the contents of a row with the newline character.
Parameters
----------
row : int
Row number to clear, relative to current anchor.
"""
self._data[self._current_row + row] = "\n"
def generate(self, return_data=False):
"""
Use the template file to generate the input file.
Parameters
----------
return_data : bool
If True, generated file data will be returned as a string.
Returns
-------
string
The generated file data if return_data is True or output filename
has not been provided, else None.
"""
if self._output_filename:
with open(self._output_filename, 'w') as f:
f.writelines(self._data)
else:
return_data = True
if return_data:
return '\n'.join(self._data)
else:
return None
class FileParser(object):
"""
Utility to locate and read data from a file.
Parameters
----------
end_of_line_comment_char : str, optional
End-of-line comment character to be ignored
(e.g., Python supports in-line comments with "#").
full_line_comment_char : str, optional
Comment character that signifies a line should be skipped.
Attributes
----------
_filename : str
the name of the file.
_data : list of string
the contents of the file, by line
_delimiter : str
the name of the file.
_end_of_line_comment_char : str
end-of-line comment character to be ignored.
_full_line_comment_char : str
comment character that signifies a line should be skipped.
_current_row : int
the current row of the file.
_anchored : bool
indicator that position is relative to a landmark location.
"""
def __init__(self, end_of_line_comment_char=None, full_line_comment_char=None):
"""
Initialize attributes.
"""
self._filename = None
self._data = []
self._delimiter = " \t"
self._end_of_line_comment_char = end_of_line_comment_char
self._full_line_comment_char = full_line_comment_char
self._current_row = 0
self._anchored = False
self.set_delimiters(self._delimiter)
def set_file(self, filename):
"""
Set the name of the file that will be generated.
Parameters
----------
filename : str
Name of the input file to be generated.
"""
self._filename = filename
inputfile = open(filename, 'r')
if not self._end_of_line_comment_char and not self._full_line_comment_char:
self._data = inputfile.readlines()
else:
self._data = []
for line in inputfile:
if line[0] == self._full_line_comment_char:
continue
self._data.append(line.split(self._end_of_line_comment_char)[0])
inputfile.close()
def set_delimiters(self, delimiter):
r"""
Set the delimiters that are used to identify field boundaries.
Parameters
----------
delimiter : str
A string containing characters to be used as delimiters. The
default value is ' \t', which means that spaces and tabs are not
taken as data but instead mark the boundaries. Note that the
parser is smart enough to recognize characters within quotes as
non-delimiters.
"""
self._delimiter = delimiter
if delimiter != "columns":
ParserElement.setDefaultWhitespaceChars(str(delimiter))
self._reset_tokens()
def mark_anchor(self, anchor, occurrence=1):
"""
Mark the location of a landmark, which lets you describe data by relative position.
Note that a forward search begins at the old anchor location. If you want to restart
the search for the anchor at the file beginning, then call ``reset_anchor()`` before
``mark_anchor``.
Parameters
----------
anchor : str
The text you want to search for.
occurrence : int
Find nth instance of text; default is 1 (first). Use -1 to
find last occurrence. Reverse searches always start at the end
of the file no matter the state of any previous anchor.
"""
if not isinstance(occurrence, int):
raise ValueError("The value for occurrence must be an integer")
instance = 0
if occurrence > 0:
count = 0
max_lines = len(self._data)
for index in range(self._current_row, max_lines):
line = self._data[index]
# If we are marking a new anchor from an existing anchor, and
# the anchor is mid-line, then we still search the line, but
# only after the anchor.
if count == 0 and self._anchored:
line = line.split(anchor)[-1]
if anchor in line:
instance += 1
if instance == occurrence:
self._current_row += count
self._anchored = True
return
count += 1
elif occurrence < 0:
max_lines = len(self._data) - 1
count = max_lines
for index in range(max_lines, -1, -1):
line = self._data[index]
# If we are marking a new anchor from an existing anchor, and
# the anchor is mid-line, then we still search the line, but
# only before the anchor.
if count == max_lines and self._anchored:
line = line.split(anchor)[0]
if anchor in line:
instance += -1
if instance == occurrence:
self._current_row = count
self._anchored = True
return
count -= 1
else:
raise ValueError("0 is not valid for an anchor occurrence.")
raise RuntimeError("Could not find pattern %s in output file %s" %
(anchor, self._filename))
def reset_anchor(self):
"""
Reset anchor to the beginning of the file.
"""
self._current_row = 0
self._anchored = False
def transfer_line(self, row):
"""
Return an entire line, relative to current anchor.
Parameters
----------
row : int
Number of lines offset from anchor line (0 is anchor line).
This can be negative.
Returns
-------
string
Line at the location requested.
"""
return self._data[self._current_row + row].rstrip()
def transfer_var(self, row, field, fieldend=None):
"""
Get a single variable relative to the current anchor.
Parameters
----------
row : int
Number of lines offset from anchor line (0 is anchor line).
This can be negative.
field : int
If the delimiter is a set of chars: which word in line to retrieve.
If the delimiter is 'columns': character position to start.
fieldend : int (optional)
If the delimiter is a set of chars: IGNORED.
If the delimiter is 'columns': position of last character to return, or if
omitted, the end of the line is used.
Returns
-------
string
Data from the requested location in the file.
"""
j = self._current_row + row
line = self._data[j]
if self._delimiter == "columns":
if not fieldend:
line = line[(field - 1):]
else:
line = line[(field - 1):(fieldend)]
# Let pyparsing figure out if this is a number, and return it
# as a float or int as appropriate
data = self._parse_line().parseString(line)
# data might have been split if it contains whitespace. If so,
# just return the whole string
if len(data) > 1:
return line
else:
return data[0]
else:
data = self._parse_line().parseString(line)
return data[field - 1]
def transfer_keyvar(self, key, field, occurrence=1, rowoffset=0):
"""
Search for a key relative to the current anchor and get a field from that line.
You can do the same thing with a call to ``mark_anchor`` and ``transfer_var``.
This function just combines them for convenience.
Parameters
----------
key : str
The key to search for.
field : int
Which field to transfer. Field 0 is the key.
occurrence : int
Find nth instance of text; default is 1 (first value
field). Use -1 to find last occurance. Position 0 is the key
field, so it should not be used as a value for occurrence.
rowoffset : int (optional)
Optional row offset from the occurrence of key. This can
also be negative.
Returns
-------
string
Data from the requested location in the file.
"""
if not isinstance(occurrence, int) or occurrence == 0:
msg = "The value for occurrence must be a nonzero integer"
raise ValueError(msg)
instance = 0
if occurrence > 0:
row = 0
for line in self._data[self._current_row:]:
if line.find(key) > -1:
instance += 1
if instance == occurrence:
break
row += 1
elif occurrence < 0:
row = -1
for line in reversed(self._data[self._current_row:]):
if line.find(key) > -1:
instance += -1
if instance == occurrence:
break
row -= 1
j = self._current_row + row + rowoffset
line = self._data[j]
fields = self._parse_line().parseString(line.replace(key, "KeyField"))
return fields[field]
def transfer_array(self, rowstart, fieldstart, rowend=None, fieldend=None):
"""
Get an array of variables relative to the current anchor.
Setting the delimiter to 'columns' elicits some special behavior
from this method. Normally, the extraction process wraps around
at the end of a line and continues grabbing each field at the start of
a newline. When the delimiter is set to columns, the parameters
(rowstart, fieldstart, rowend, fieldend) demark a box, and all
values in that box are retrieved. Note that standard whitespace
is the secondary delimiter in this case.
Parameters
----------
rowstart : int
Row number to start, relative to the current anchor.
fieldstart : int
Field number to start.
rowend : int, optional
Row number to end. If not set, then only one row is grabbed.
fieldend : int
Field number to end.
Returns
-------
string
Data from the requested location in the file.
"""
j1 = self._current_row + rowstart
if rowend is None:
j2 = j1 + 1
else:
j2 = self._current_row + rowend + 1
if not fieldend:
raise ValueError("fieldend is missing, currently required")
lines = self._data[j1:j2]
data = np.zeros(shape=(0, 0))
for i, line in enumerate(lines):
if self._delimiter == "columns":
line = line[(fieldstart - 1):fieldend]
# Stripping whitespace may be controversial.
line = line.strip()
# Let pyparsing figure out if this is a number, and return it
# as a float or int as appropriate
parsed = self._parse_line().parseString(line)
newdata = np.array(parsed[:])
# data might have been split if it contains whitespace. If the
# data is string, we probably didn't want this.
if newdata.dtype.type is np.str_:
newdata = np.array(line)
data = np.append(data, newdata)
else:
parsed = self._parse_line().parseString(line)
if i == j2 - j1 - 1:
data = np.append(data, np.array(parsed[(fieldstart - 1):fieldend]))
else:
data = np.append(data, np.array(parsed[(fieldstart - 1):]))
fieldstart = 1
return data
def transfer_2Darray(self, rowstart, fieldstart, rowend, fieldend=None):
"""
Get a 2D array of variables relative to the current anchor.
Each line of data is placed in a separate row.
If the delimiter is set to 'columns', then the values contained in
fieldstart and fieldend should be the column number instead of the
field number.
Parameters
----------
rowstart : int
Row number to start, relative to the current anchor.
fieldstart : int
Field number to start.
rowend : int
Row number to end relative to current anchor.
fieldend : int (optional)
Field number to end. If not specified, grabs all fields up to the
end of the line.
Returns
-------
string
Data from the requested location in the file.
"""
if fieldend and (fieldstart > fieldend):
msg = "fieldend must be greater than fieldstart"
raise ValueError(msg)
if rowstart > rowend:
msg = "rowend must be greater than rowstart"
raise ValueError(msg)
j1 = self._current_row + rowstart
j2 = self._current_row + rowend + 1
lines = list(self._data[j1:j2])
if self._delimiter == "columns":
if fieldend:
line = lines[0][(fieldstart - 1):fieldend]
else:
line = lines[0][(fieldstart - 1):]
parsed = self._parse_line().parseString(line)
row = np.array(parsed[:])
data = np.zeros(shape=(abs(j2 - j1), len(row)))
data[0, :] = row
for i, line in enumerate(list(lines[1:])):
if fieldend:
line = line[(fieldstart - 1):fieldend]
else:
line = line[(fieldstart - 1):]
parsed = self._parse_line().parseString(line)
data[i + 1, :] = np.array(parsed[:])
else:
parsed = self._parse_line().parseString(lines[0])
if fieldend:
row = np.array(parsed[(fieldstart - 1):fieldend])
else:
row = np.array(parsed[(fieldstart - 1):])
data = np.zeros(shape=(abs(j2 - j1), len(row)))
data[0, :] = row
for i, line in enumerate(list(lines[1:])):
parsed = self._parse_line().parseString(line)
if fieldend:
try:
data[i + 1, :] = np.array(parsed[(fieldstart - 1):fieldend])
except Exception:
print(data)
else:
data[i + 1, :] = np.array(parsed[(fieldstart - 1):])
return data
def _parse_line(self):
"""
Parse a single data line that may contain string or numerical data.
Float and Int 'words' are converted to their appropriate type.
Exponentiation is supported, as are NaN and Inf.
Returns
-------
<ParserElement>
the parsed line.
"""
return self.line_parse_token
def _reset_tokens(self):
"""
Set up the tokens for pyparsing.
"""
# Somewhat of a hack, but we can only use printables if the delimiter is
# just whitespace. Otherwise, some seprators (like ',' or '=') potentially
# get parsed into the general string text. So, if we have non whitespace
# delimiters, we need to fall back to just alphanums, and then add in any
# missing but important symbols to parse.
if self._delimiter.isspace():
textchars = printables
else:
textchars = alphanums
symbols = ['.', '/', '+', '*', '^', '(', ')', '[', ']', '=',
':', ';', '?', '%', '&', '!', '#', '|', '<', '>',
'{', '}', '-', '_', '@', '$', '~']
for symbol in symbols:
if symbol not in self._delimiter:
textchars = textchars + symbol
digits = Word(nums)
dot = "."
sign = oneOf("+ -")
ee = CaselessLiteral('E') | CaselessLiteral('D')
num_int = _ToInteger(Combine(Optional(sign) + digits))
num_float = _ToFloat(Combine(
Optional(sign) +
((digits + dot + Optional(digits)) | (dot + digits)) +
Optional(ee + Optional(sign) + digits)
))
# special case for a float written like "3e5"
mixed_exp = _ToFloat(Combine(digits + ee + Optional(sign) + digits))
nan = (_ToInf(oneOf("Inf -Inf")) |
_ToNan(oneOf("NaN nan NaN% NaNQ NaNS qNaN sNaN 1.#SNAN 1.#QNAN -1.#IND")))
string_text = Word(textchars)
self.line_parse_token = (OneOrMore((nan | num_float | mixed_exp | num_int | string_text)))
|
rplugin/python3/ultest/models/result.py
|
jpserra/vim-ultest
| 313 |
129815
|
<reponame>jpserra/vim-ultest<filename>rplugin/python3/ultest/models/result.py
import json
from dataclasses import asdict, dataclass
@dataclass
class Result:
id: str
file: str
code: int
output: str
def __str__(self):
props = self.dict()
return json.dumps(props)
def dict(self):
return asdict(self)
|
tests/gis_tests/geogapp/models.py
|
ni-ning/django
| 61,676 |
129819
|
<gh_stars>1000+
from django.contrib.gis.db import models
class NamedModel(models.Model):
name = models.CharField(max_length=30)
class Meta:
abstract = True
def __str__(self):
return self.name
class City(NamedModel):
point = models.PointField(geography=True)
class Meta:
app_label = 'geogapp'
class Zipcode(NamedModel):
code = models.CharField(max_length=10)
poly = models.PolygonField(geography=True)
class County(NamedModel):
state = models.CharField(max_length=20)
mpoly = models.MultiPolygonField(geography=True)
class Meta:
app_label = 'geogapp'
def __str__(self):
return ' County, '.join([self.name, self.state])
|
networkx/algorithms/centrality/voterank_alg.py
|
jebogaert/networkx
| 10,024 |
129832
|
"""Algorithm to select influential nodes in a graph using VoteRank."""
__all__ = ["voterank"]
def voterank(G, number_of_nodes=None):
"""Select a list of influential nodes in a graph using VoteRank algorithm
VoteRank [1]_ computes a ranking of the nodes in a graph G based on a
voting scheme. With VoteRank, all nodes vote for each of its in-neighbours
and the node with the highest votes is elected iteratively. The voting
ability of out-neighbors of elected nodes is decreased in subsequent turns.
Note: We treat each edge independently in case of multigraphs.
Parameters
----------
G : graph
A NetworkX graph.
number_of_nodes : integer, optional
Number of ranked nodes to extract (default all nodes).
Returns
-------
voterank : list
Ordered list of computed seeds.
Only nodes with positive number of votes are returned.
References
----------
.. [1] <NAME>. et al. (2016).
Identifying a set of influential spreaders in complex networks.
Sci. Rep. 6, 27823; doi: 10.1038/srep27823.
"""
influential_nodes = []
voterank = {}
if len(G) == 0:
return influential_nodes
if number_of_nodes is None or number_of_nodes > len(G):
number_of_nodes = len(G)
if G.is_directed():
# For directed graphs compute average out-degree
avgDegree = sum(deg for _, deg in G.out_degree()) / len(G)
else:
# For undirected graphs compute average degree
avgDegree = sum(deg for _, deg in G.degree()) / len(G)
# step 1 - initiate all nodes to (0,1) (score, voting ability)
for n in G.nodes():
voterank[n] = [0, 1]
# Repeat steps 1b to 4 until num_seeds are elected.
for _ in range(number_of_nodes):
# step 1b - reset rank
for n in G.nodes():
voterank[n][0] = 0
# step 2 - vote
for n, nbr in G.edges():
# In directed graphs nodes only vote for their in-neighbors
voterank[n][0] += voterank[nbr][1]
if not G.is_directed():
voterank[nbr][0] += voterank[n][1]
for n in influential_nodes:
voterank[n][0] = 0
# step 3 - select top node
n = max(G.nodes, key=lambda x: voterank[x][0])
if voterank[n][0] == 0:
return influential_nodes
influential_nodes.append(n)
# weaken the selected node
voterank[n] = [0, 0]
# step 4 - update voterank properties
for _, nbr in G.edges(n):
voterank[nbr][1] -= 1 / avgDegree
voterank[nbr][1] = max(voterank[nbr][1], 0)
return influential_nodes
|
nlpaug/augmenter/audio/mask.py
|
techthiyanes/nlpaug
| 3,121 |
129848
|
"""
Augmenter that apply mask operation to audio.
"""
from nlpaug.augmenter.audio import AudioAugmenter
import nlpaug.model.audio as nma
from nlpaug.util import Action, WarningMessage
class MaskAug(AudioAugmenter):
"""
:param int sampling_rate: Sampling rate of input audio. Mandatory if duration is provided.
:param tuple zone: Assign a zone for augmentation. Default value is (0.2, 0.8) which means that no any
augmentation will be applied in first 20% and last 20% of whole audio.
:param float coverage: Portion of augmentation. Value should be between 0 and 1. If `1` is assigned, augment
operation will be applied to target audio segment. For example, the audio duration is 60 seconds while
zone and coverage are (0.2, 0.8) and 0.7 respectively. 42 seconds ((0.8-0.2)*0.7*60) audio will be
augmented.
:param int duration: Duration of augmentation (in second). Default value is None. If value is provided. `coverage`
value will be ignored.
:param bool mask_with_noise: If it is True, targeting area will be replaced by noise. Otherwise, it will be
replaced by 0.
:param str name: Name of this augmenter
>>> import nlpaug.augmenter.audio as naa
>>> aug = naa.MaskAug(sampling_rate=44010)
"""
def __init__(self, sampling_rate=None, zone=(0.2, 0.8), coverage=1., duration=None,
mask_with_noise=True, name='Mask_Aug', verbose=0, stateless=True):
super().__init__(
action=Action.SUBSTITUTE, zone=zone, coverage=coverage, duration=duration,
name=name, device='cpu', verbose=verbose, stateless=stateless)
self.mask_with_noise = mask_with_noise
self.model = nma.Mask()
def substitute(self, data):
start_pos, end_pos = self.get_augment_range_by_coverage(data)
if not self.stateless:
self.start_pos, self.end_pos = start_pos, end_pos
return self.model.manipulate(data, start_pos=start_pos, end_pos=end_pos,
mask_with_noise=self.mask_with_noise)
|
pymorphy2__examples/определение_комментариев_женщин/main.py
|
gil9red/SimplePyScripts
| 117 |
129878
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install nltk
import nltk
# pip install pymorphy2
import pymorphy2
# TODO: объединить функции is_ADJS_sing_* и is_VERB_sing_*,
# а проверку пола вынести в отдельую функцию
def is_ADJS_sing_femn(parsed: pymorphy2.analyzer.Parse) -> bool:
"""
Имя прилагательное (краткое) + единственное число + женский род
"""
return {'ADJS', 'sing', 'femn'} in parsed.tag
def is_ADJS_sing_masc(parsed: pymorphy2.analyzer.Parse) -> bool:
"""
Имя прилагательное (краткое) + единственное число + мужской род
"""
return {'ADJS', 'sing', 'masc'} in parsed.tag
def is_VERB_sing_femn(parsed: pymorphy2.analyzer.Parse) -> bool:
"""
Глагол (личная форма) + единственное число + женский род
"""
return {'VERB', 'sing', 'femn'} in parsed.tag
def is_VERB_sing_masc(parsed: pymorphy2.analyzer.Parse) -> bool:
"""
Глагол (личная форма) + единственное число + мужской род
"""
return {'VERB', 'sing', 'masc'} in parsed.tag
def is_NPRO_1per_sing(parsed: pymorphy2.analyzer.Parse) -> bool:
"""
Местоимение-существительное + 1 лицо + единственное число
"""
return {'NPRO', '1per', 'sing'} in parsed.tag
LOG_DEBUG = False
# LOG_DEBUG = True
morph = pymorphy2.MorphAnalyzer()
def is_femn(text: str) -> bool:
for line in text.splitlines():
LOG_DEBUG and print(f'[#] Comment: {line!r}')
# Перебор предложений
for sent in nltk.sent_tokenize(line, language='russian'):
LOG_DEBUG and print(f'[#] Comment part: {sent!r}')
words = nltk.word_tokenize(sent)
parsed_words = [morph.parse(word)[0] for word in words]
parsed = parsed_words[0]
if is_ADJS_sing_femn(parsed) or is_VERB_sing_femn(parsed):
return True
if is_ADJS_sing_masc(parsed) or is_VERB_sing_masc(parsed):
return False
has_NPRO_1per_sing = False
LOG_DEBUG and print(f'[#] ({len(words)}): {words}')
LOG_DEBUG and print(f'[#] ({len(parsed_words)}):')
for parsed in parsed_words:
LOG_DEBUG and print(f'[#] {parsed.word} - {str(parsed.tag)!r}')
if is_NPRO_1per_sing(parsed):
has_NPRO_1per_sing = True
LOG_DEBUG and print(f'[!]{" " * 12} FOUND #1!')
continue
if has_NPRO_1per_sing:
# Если встретили в мужском роде, выходим
if is_ADJS_sing_masc(parsed) or is_VERB_sing_masc(parsed):
return False
if is_ADJS_sing_femn(parsed) or is_VERB_sing_femn(parsed):
LOG_DEBUG and print(f'[!]{" " * 12} FOUND #2!')
return True
return False
if __name__ == '__main__':
import json
with open('comments.json', encoding='utf-8') as f:
data = json.load(f)
comments = [(x['text'], x['expected']) for x in data]
matches = 0
total = len(comments)
for i, (text, expected) in enumerate(comments, 1):
has_female = is_femn(text)
match = has_female == (expected == 'female')
matches += match
print(f"{i}. {text!r}"
f"\n [{'+' if match else '-'}] "
f"Expected={expected}, "
f"actual={'female' if has_female else 'male'}")
print('-' * 100)
print(f'Total: {matches} / {total}')
|
sdk/python/pulumi_gcp/projects/iam_policy.py
|
la3mmchen/pulumi-gcp
| 121 |
129888
|
<filename>sdk/python/pulumi_gcp/projects/iam_policy.py<gh_stars>100-1000
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['IAMPolicyArgs', 'IAMPolicy']
@pulumi.input_type
class IAMPolicyArgs:
def __init__(__self__, *,
policy_data: pulumi.Input[str],
project: pulumi.Input[str]):
"""
The set of arguments for constructing a IAMPolicy resource.
:param pulumi.Input[str] policy_data: The `organizations.get_iam_policy` data source that represents
the IAM policy that will be applied to the project. The policy will be
merged with any existing policy applied to the project.
:param pulumi.Input[str] project: The project id of the target project. This is not
inferred from the provider.
"""
pulumi.set(__self__, "policy_data", policy_data)
pulumi.set(__self__, "project", project)
@property
@pulumi.getter(name="policyData")
def policy_data(self) -> pulumi.Input[str]:
"""
The `organizations.get_iam_policy` data source that represents
the IAM policy that will be applied to the project. The policy will be
merged with any existing policy applied to the project.
"""
return pulumi.get(self, "policy_data")
@policy_data.setter
def policy_data(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_data", value)
@property
@pulumi.getter
def project(self) -> pulumi.Input[str]:
"""
The project id of the target project. This is not
inferred from the provider.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: pulumi.Input[str]):
pulumi.set(self, "project", value)
@pulumi.input_type
class _IAMPolicyState:
def __init__(__self__, *,
etag: Optional[pulumi.Input[str]] = None,
policy_data: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering IAMPolicy resources.
:param pulumi.Input[str] etag: (Computed) The etag of the project's IAM policy.
:param pulumi.Input[str] policy_data: The `organizations.get_iam_policy` data source that represents
the IAM policy that will be applied to the project. The policy will be
merged with any existing policy applied to the project.
:param pulumi.Input[str] project: The project id of the target project. This is not
inferred from the provider.
"""
if etag is not None:
pulumi.set(__self__, "etag", etag)
if policy_data is not None:
pulumi.set(__self__, "policy_data", policy_data)
if project is not None:
pulumi.set(__self__, "project", project)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
(Computed) The etag of the project's IAM policy.
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter(name="policyData")
def policy_data(self) -> Optional[pulumi.Input[str]]:
"""
The `organizations.get_iam_policy` data source that represents
the IAM policy that will be applied to the project. The policy will be
merged with any existing policy applied to the project.
"""
return pulumi.get(self, "policy_data")
@policy_data.setter
def policy_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_data", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The project id of the target project. This is not
inferred from the provider.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
class IAMPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
policy_data: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Four different resources help you manage your IAM policy for a project. Each of these resources serves a different use case:
* `projects.IAMPolicy`: Authoritative. Sets the IAM policy for the project and replaces any existing policy already attached.
* `projects.IAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the project are preserved.
* `projects.IAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the project are preserved.
* `projects.IAMAuditConfig`: Authoritative for a given service. Updates the IAM policy to enable audit logging for the given service.
> **Note:** `projects.IAMPolicy` **cannot** be used in conjunction with `projects.IAMBinding`, `projects.IAMMember`, or `projects.IAMAuditConfig` or they will fight over what your policy should be.
> **Note:** `projects.IAMBinding` resources **can be** used in conjunction with `projects.IAMMember` resources **only if** they do not grant privilege to the same role.
> **Note:** The underlying API method `projects.setIamPolicy` has a lot of constraints which are documented [here](https://cloud.google.com/resource-manager/reference/rest/v1/projects/setIamPolicy). In addition to these constraints,
IAM Conditions cannot be used with Basic Roles such as Owner. Violating these constraints will result in the API returning 400 error code so please review these if you encounter errors with this resource.
## google\_project\_iam\_policy
> **Be careful!** You can accidentally lock yourself out of your project
using this resource. Deleting a `projects.IAMPolicy` removes access
from anyone without organization-level access to the project. Proceed with caution.
It's not recommended to use `projects.IAMPolicy` with your provider project
to avoid locking yourself out, and it should generally only be used with projects
fully managed by this provider. If you do use this resource, it is recommended to **import** the policy before
applying the change.
```python
import pulumi
import pulumi_gcp as gcp
admin = gcp.organizations.get_iam_policy(bindings=[gcp.organizations.GetIAMPolicyBindingArgs(
role="roles/editor",
members=["user:<EMAIL>"],
)])
project = gcp.projects.IAMPolicy("project",
project="your-project-id",
policy_data=admin.policy_data)
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
admin = gcp.organizations.get_iam_policy(bindings=[gcp.organizations.GetIAMPolicyBindingArgs(
condition=gcp.organizations.GetIAMPolicyBindingConditionArgs(
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\"2020-01-01T00:00:00Z\")",
title="expires_after_2019_12_31",
),
members=["user:<EMAIL>"],
role="roles/compute.admin",
)])
project = gcp.projects.IAMPolicy("project",
policy_data=admin.policy_data,
project="your-project-id")
```
## google\_project\_iam\_binding
```python
import pulumi
import pulumi_gcp as gcp
project = gcp.projects.IAMBinding("project",
members=["user:<EMAIL>"],
project="your-project-id",
role="roles/editor")
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
project = gcp.projects.IAMBinding("project",
condition=gcp.projects.IAMBindingConditionArgs(
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\"2020-01-01T00:00:00Z\")",
title="expires_after_2019_12_31",
),
members=["user:<EMAIL>"],
project="your-project-id",
role="roles/container.admin")
```
## google\_project\_iam\_member
```python
import pulumi
import pulumi_gcp as gcp
project = gcp.projects.IAMMember("project",
member="user:<EMAIL>",
project="your-project-id",
role="roles/editor")
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
project = gcp.projects.IAMMember("project",
condition=gcp.projects.IAMMemberConditionArgs(
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\"2020-01-01T00:00:00Z\")",
title="expires_after_2019_12_31",
),
member="user:<EMAIL>",
project="your-project-id",
role="roles/firebase.admin")
```
## google\_project\_iam\_audit\_config
```python
import pulumi
import pulumi_gcp as gcp
project = gcp.projects.IAMAuditConfig("project",
audit_log_configs=[
gcp.projects.IAMAuditConfigAuditLogConfigArgs(
log_type="ADMIN_READ",
),
gcp.projects.IAMAuditConfigAuditLogConfigArgs(
exempted_members=["user:<EMAIL>"],
log_type="DATA_READ",
),
],
project="your-project-id",
service="allServices")
```
## Import
IAM member imports use space-delimited identifiers; the resource in question, the role, and the account.
This member resource can be imported using the `project_id`, role, and member e.g.
```sh
$ pulumi import gcp:projects/iAMPolicy:IAMPolicy my_project "your-project-id roles/viewer user:<EMAIL>"
```
IAM binding imports use space-delimited identifiers; the resource in question and the role.
This binding resource can be imported using the `project_id` and role, e.g.
```sh
$ pulumi import gcp:projects/iAMPolicy:IAMPolicy my_project "your-project-id roles/viewer"
```
IAM policy imports use the identifier of the resource in question.
This policy resource can be imported using the `project_id`.
```sh
$ pulumi import gcp:projects/iAMPolicy:IAMPolicy my_project your-project-id
```
IAM audit config imports use the identifier of the resource in question and the service, e.g.
```sh
$ pulumi import gcp:projects/iAMPolicy:IAMPolicy my_project "your-project-id foo.googleapis.com"
```
-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the
full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] policy_data: The `organizations.get_iam_policy` data source that represents
the IAM policy that will be applied to the project. The policy will be
merged with any existing policy applied to the project.
:param pulumi.Input[str] project: The project id of the target project. This is not
inferred from the provider.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IAMPolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Four different resources help you manage your IAM policy for a project. Each of these resources serves a different use case:
* `projects.IAMPolicy`: Authoritative. Sets the IAM policy for the project and replaces any existing policy already attached.
* `projects.IAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the project are preserved.
* `projects.IAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the project are preserved.
* `projects.IAMAuditConfig`: Authoritative for a given service. Updates the IAM policy to enable audit logging for the given service.
> **Note:** `projects.IAMPolicy` **cannot** be used in conjunction with `projects.IAMBinding`, `projects.IAMMember`, or `projects.IAMAuditConfig` or they will fight over what your policy should be.
> **Note:** `projects.IAMBinding` resources **can be** used in conjunction with `projects.IAMMember` resources **only if** they do not grant privilege to the same role.
> **Note:** The underlying API method `projects.setIamPolicy` has a lot of constraints which are documented [here](https://cloud.google.com/resource-manager/reference/rest/v1/projects/setIamPolicy). In addition to these constraints,
IAM Conditions cannot be used with Basic Roles such as Owner. Violating these constraints will result in the API returning 400 error code so please review these if you encounter errors with this resource.
## google\_project\_iam\_policy
> **Be careful!** You can accidentally lock yourself out of your project
using this resource. Deleting a `projects.IAMPolicy` removes access
from anyone without organization-level access to the project. Proceed with caution.
It's not recommended to use `projects.IAMPolicy` with your provider project
to avoid locking yourself out, and it should generally only be used with projects
fully managed by this provider. If you do use this resource, it is recommended to **import** the policy before
applying the change.
```python
import pulumi
import pulumi_gcp as gcp
admin = gcp.organizations.get_iam_policy(bindings=[gcp.organizations.GetIAMPolicyBindingArgs(
role="roles/editor",
members=["user:<EMAIL>"],
)])
project = gcp.projects.IAMPolicy("project",
project="your-project-id",
policy_data=admin.policy_data)
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
admin = gcp.organizations.get_iam_policy(bindings=[gcp.organizations.GetIAMPolicyBindingArgs(
condition=gcp.organizations.GetIAMPolicyBindingConditionArgs(
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\"2020-01-01T00:00:00Z\")",
title="expires_after_2019_12_31",
),
members=["user:<EMAIL>"],
role="roles/compute.admin",
)])
project = gcp.projects.IAMPolicy("project",
policy_data=admin.policy_data,
project="your-project-id")
```
## google\_project\_iam\_binding
```python
import pulumi
import pulumi_gcp as gcp
project = gcp.projects.IAMBinding("project",
members=["user:<EMAIL>"],
project="your-project-id",
role="roles/editor")
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
project = gcp.projects.IAMBinding("project",
condition=gcp.projects.IAMBindingConditionArgs(
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\"2020-01-01T00:00:00Z\")",
title="expires_after_2019_12_31",
),
members=["user:<EMAIL>"],
project="your-project-id",
role="roles/container.admin")
```
## google\_project\_iam\_member
```python
import pulumi
import pulumi_gcp as gcp
project = gcp.projects.IAMMember("project",
member="user:<EMAIL>",
project="your-project-id",
role="roles/editor")
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
project = gcp.projects.IAMMember("project",
condition=gcp.projects.IAMMemberConditionArgs(
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\"2020-01-01T00:00:00Z\")",
title="expires_after_2019_12_31",
),
member="user:<EMAIL>",
project="your-project-id",
role="roles/firebase.admin")
```
## google\_project\_iam\_audit\_config
```python
import pulumi
import pulumi_gcp as gcp
project = gcp.projects.IAMAuditConfig("project",
audit_log_configs=[
gcp.projects.IAMAuditConfigAuditLogConfigArgs(
log_type="ADMIN_READ",
),
gcp.projects.IAMAuditConfigAuditLogConfigArgs(
exempted_members=["user:<EMAIL>"],
log_type="DATA_READ",
),
],
project="your-project-id",
service="allServices")
```
## Import
IAM member imports use space-delimited identifiers; the resource in question, the role, and the account.
This member resource can be imported using the `project_id`, role, and member e.g.
```sh
$ pulumi import gcp:projects/iAMPolicy:IAMPolicy my_project "your-project-id roles/viewer user:<EMAIL>"
```
IAM binding imports use space-delimited identifiers; the resource in question and the role.
This binding resource can be imported using the `project_id` and role, e.g.
```sh
$ pulumi import gcp:projects/iAMPolicy:IAMPolicy my_project "your-project-id roles/viewer"
```
IAM policy imports use the identifier of the resource in question.
This policy resource can be imported using the `project_id`.
```sh
$ pulumi import gcp:projects/iAMPolicy:IAMPolicy my_project your-project-id
```
IAM audit config imports use the identifier of the resource in question and the service, e.g.
```sh
$ pulumi import gcp:projects/iAMPolicy:IAMPolicy my_project "your-project-id foo.googleapis.com"
```
-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the
full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`.
:param str resource_name: The name of the resource.
:param IAMPolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IAMPolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
policy_data: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IAMPolicyArgs.__new__(IAMPolicyArgs)
if policy_data is None and not opts.urn:
raise TypeError("Missing required property 'policy_data'")
__props__.__dict__["policy_data"] = policy_data
if project is None and not opts.urn:
raise TypeError("Missing required property 'project'")
__props__.__dict__["project"] = project
__props__.__dict__["etag"] = None
super(IAMPolicy, __self__).__init__(
'gcp:projects/iAMPolicy:IAMPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
etag: Optional[pulumi.Input[str]] = None,
policy_data: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None) -> 'IAMPolicy':
"""
Get an existing IAMPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] etag: (Computed) The etag of the project's IAM policy.
:param pulumi.Input[str] policy_data: The `organizations.get_iam_policy` data source that represents
the IAM policy that will be applied to the project. The policy will be
merged with any existing policy applied to the project.
:param pulumi.Input[str] project: The project id of the target project. This is not
inferred from the provider.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _IAMPolicyState.__new__(_IAMPolicyState)
__props__.__dict__["etag"] = etag
__props__.__dict__["policy_data"] = policy_data
__props__.__dict__["project"] = project
return IAMPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
(Computed) The etag of the project's IAM policy.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="policyData")
def policy_data(self) -> pulumi.Output[str]:
"""
The `organizations.get_iam_policy` data source that represents
the IAM policy that will be applied to the project. The policy will be
merged with any existing policy applied to the project.
"""
return pulumi.get(self, "policy_data")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The project id of the target project. This is not
inferred from the provider.
"""
return pulumi.get(self, "project")
|
python/GafferImageTest/ImageReaderTest.py
|
ddesmond/gaffer
| 561 |
129899
|
##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import shutil
import unittest
import six
import imath
import IECore
import Gaffer
import GafferTest
import GafferImage
import GafferImageTest
class ImageReaderTest( GafferImageTest.ImageTestCase ) :
fileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/circles.exr" )
colorSpaceFileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/circles_as_cineon.exr" )
offsetDataWindowFileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/rgb.100x100.exr" )
jpgFileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/circles.jpg" )
largeFileName = os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/colorbars_max_clamp.exr" )
def setUp( self ) :
GafferImageTest.ImageTestCase.setUp( self )
self.__defaultColorSpaceFunction = GafferImage.ImageReader.getDefaultColorSpaceFunction()
def tearDown( self ) :
GafferImageTest.ImageTestCase.tearDown( self )
GafferImage.ImageReader.setDefaultColorSpaceFunction( self.__defaultColorSpaceFunction )
def test( self ) :
n = GafferImage.ImageReader()
n["fileName"].setValue( self.fileName )
oiio = GafferImage.OpenImageIOReader()
oiio["fileName"].setValue( self.fileName )
self.assertEqual( n["out"]["format"].getValue(), oiio["out"]["format"].getValue() )
self.assertEqual( n["out"]["dataWindow"].getValue(), oiio["out"]["dataWindow"].getValue() )
self.assertEqual( n["out"]["metadata"].getValue(), oiio["out"]["metadata"].getValue() )
self.assertEqual( n["out"]["deep"].getValue(), oiio["out"]["deep"].getValue() )
self.assertEqual( n["out"]["channelNames"].getValue(), oiio["out"]["channelNames"].getValue() )
self.assertEqual( n["out"].sampleOffsets( imath.V2i( 0 ) ), oiio["out"].sampleOffsets( imath.V2i( 0 ) ) )
self.assertEqual( n["out"].channelData( "R", imath.V2i( 0 ) ), oiio["out"].channelData( "R", imath.V2i( 0 ) ) )
self.assertImagesEqual( n["out"], oiio["out"] )
def testUnspecifiedFilename( self ) :
n = GafferImage.ImageReader()
n["out"]["channelNames"].getValue()
self.assertTrue( GafferImage.BufferAlgo.empty( n["out"]['dataWindow'].getValue() ) )
def testChannelDataHashes( self ) :
# Test that two tiles within the same image have different hashes.
n = GafferImage.ImageReader()
n["fileName"].setValue( self.largeFileName )
h1 = n["out"].channelData( "R", imath.V2i( 0 ) ).hash()
h2 = n["out"].channelData( "R", imath.V2i( GafferImage.ImagePlug().tileSize() ) ).hash()
self.assertNotEqual( h1, h2 )
def testColorSpaceOverride( self ) :
exrReader = GafferImage.ImageReader()
exrReader["fileName"].setValue( self.fileName )
exrReader["colorSpace"].setValue( "Cineon" )
colorSpaceOverrideReader = GafferImage.ImageReader()
colorSpaceOverrideReader["fileName"].setValue( self.colorSpaceFileName )
exrImage = exrReader["out"]
colorSpaceOverrideImage = colorSpaceOverrideReader["out"]
self.assertImagesEqual( colorSpaceOverrideImage, exrImage, ignoreMetadata = True, maxDifference = 0.005 )
def testJpgRead( self ) :
exrReader = GafferImage.ImageReader()
exrReader["fileName"].setValue( self.fileName )
jpgReader = GafferImage.ImageReader()
jpgReader["fileName"].setValue( self.jpgFileName )
self.assertImagesEqual( exrReader["out"], jpgReader["out"], ignoreMetadata = True, maxDifference = 0.001 )
def testSupportedExtensions( self ) :
self.assertEqual( GafferImage.ImageReader.supportedExtensions(), GafferImage.OpenImageIOReader.supportedExtensions() )
def testFileRefresh( self ) :
testFile = self.temporaryDirectory() + "/refresh.exr"
shutil.copyfile( self.fileName, testFile )
reader = GafferImage.ImageReader()
reader["fileName"].setValue( testFile )
image1 = GafferImage.ImageAlgo.image( reader["out"] )
# even though we've change the image on disk, gaffer will
# still have the old one in its cache.
shutil.copyfile( self.jpgFileName, testFile )
self.assertEqual( GafferImage.ImageAlgo.image( reader["out"] ), image1 )
# until we force a refresh
reader["refreshCount"].setValue( reader["refreshCount"].getValue() + 1 )
self.assertNotEqual( GafferImage.ImageAlgo.image( reader["out"] ), image1 )
def testNonexistentFiles( self ) :
reader = GafferImage.ImageReader()
reader["fileName"].setValue( "wellIDontExist.exr" )
six.assertRaisesRegex( self, RuntimeError, ".*wellIDontExist.exr.*", reader["out"]["format"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*wellIDontExist.exr.*", reader["out"]["dataWindow"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*wellIDontExist.exr.*", reader["out"]["metadata"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*wellIDontExist.exr.*", reader["out"]["channelNames"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*wellIDontExist.exr.*", reader["out"].channelData, "R", imath.V2i( 0 ) )
six.assertRaisesRegex( self, RuntimeError, ".*wellIDontExist.exr.*", GafferImage.ImageAlgo.image, reader["out"] )
def testMissingFrameMode( self ) :
testSequence = IECore.FileSequence( self.temporaryDirectory() + "/incompleteSequence.####.exr" )
shutil.copyfile( self.fileName, testSequence.fileNameForFrame( 1 ) )
shutil.copyfile( self.offsetDataWindowFileName, testSequence.fileNameForFrame( 3 ) )
reader = GafferImage.ImageReader()
reader["fileName"].setValue( testSequence.fileName )
oiio = GafferImage.OpenImageIOReader()
oiio["fileName"].setValue( testSequence.fileName )
def assertMatch() :
self.assertEqual( reader["out"]["format"].getValue(), oiio["out"]["format"].getValue() )
self.assertEqual( reader["out"]["dataWindow"].getValue(), oiio["out"]["dataWindow"].getValue() )
self.assertEqual( reader["out"]["metadata"].getValue(), oiio["out"]["metadata"].getValue() )
self.assertEqual( reader["out"]["channelNames"].getValue(), oiio["out"]["channelNames"].getValue() )
# It is only valid to query the data inside the data window
if not GafferImage.BufferAlgo.empty( reader["out"]["dataWindow"].getValue() ):
self.assertEqual( reader["out"].channelData( "R", imath.V2i( 0 ) ), oiio["out"].channelData( "R", imath.V2i( 0 ) ) )
self.assertImagesEqual( reader["out"], oiio["out"] )
context = Gaffer.Context()
# set to a missing frame
context.setFrame( 2 )
# everything throws
reader["missingFrameMode"].setValue( GafferImage.ImageReader.MissingFrameMode.Error )
with context :
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"]["format"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"]["dataWindow"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"]["metadata"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"]["channelNames"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"].channelData, "R", imath.V2i( 0 ) )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", GafferImage.ImageAlgo.image, reader["out"] )
# Hold mode matches OpenImageIOReader
reader["missingFrameMode"].setValue( GafferImage.ImageReader.MissingFrameMode.Hold )
oiio["missingFrameMode"].setValue( GafferImage.OpenImageIOReader.MissingFrameMode.Hold )
with context :
assertMatch()
# Black mode matches OpenImageIOReader
reader["missingFrameMode"].setValue( GafferImage.ImageReader.MissingFrameMode.Black )
oiio["missingFrameMode"].setValue( GafferImage.OpenImageIOReader.MissingFrameMode.Black )
with context :
assertMatch()
# set to a different missing frame
context.setFrame( 4 )
# Hold mode matches OpenImageIOReader
reader["missingFrameMode"].setValue( GafferImage.ImageReader.MissingFrameMode.Hold )
oiio["missingFrameMode"].setValue( GafferImage.OpenImageIOReader.MissingFrameMode.Hold )
with context :
assertMatch()
# Black mode matches OpenImageIOReader
reader["missingFrameMode"].setValue( GafferImage.ImageReader.MissingFrameMode.Black )
oiio["missingFrameMode"].setValue( GafferImage.OpenImageIOReader.MissingFrameMode.Black )
with context :
assertMatch()
# set to a missing frame before the start of the sequence
context.setFrame( 0 )
# Hold mode matches OpenImageIOReader
reader["missingFrameMode"].setValue( GafferImage.ImageReader.MissingFrameMode.Hold )
oiio["missingFrameMode"].setValue( GafferImage.OpenImageIOReader.MissingFrameMode.Hold )
with context :
assertMatch()
# Black mode matches OpenImageIOReader
reader["missingFrameMode"].setValue( GafferImage.ImageReader.MissingFrameMode.Black )
oiio["missingFrameMode"].setValue( GafferImage.OpenImageIOReader.MissingFrameMode.Black )
with context :
assertMatch()
# explicit fileNames do not support MissingFrameMode
reader["fileName"].setValue( testSequence.fileNameForFrame( 0 ) )
reader["missingFrameMode"].setValue( GafferImage.OpenImageIOReader.MissingFrameMode.Hold )
with context :
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", GafferImage.ImageAlgo.image, reader["out"] )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"]["format"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"]["dataWindow"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"]["metadata"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"]["channelNames"].getValue )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"].channelData, "R", imath.V2i( 0 ) )
reader["missingFrameMode"].setValue( GafferImage.OpenImageIOReader.MissingFrameMode.Black )
oiio["missingFrameMode"].setValue( GafferImage.OpenImageIOReader.MissingFrameMode.Black )
with context :
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", GafferImage.ImageAlgo.image, reader["out"] )
six.assertRaisesRegex( self, RuntimeError, ".*incompleteSequence.*.exr.*", reader["out"]["format"].getValue )
self.assertEqual( reader["out"]["dataWindow"].getValue(), oiio["out"]["dataWindow"].getValue() )
self.assertEqual( reader["out"]["metadata"].getValue(), oiio["out"]["metadata"].getValue() )
self.assertEqual( reader["out"]["channelNames"].getValue(), oiio["out"]["channelNames"].getValue() )
self.assertTrue( GafferImage.BufferAlgo.empty( reader["out"]['dataWindow'].getValue() ) )
self.assertTrue( GafferImage.BufferAlgo.empty( oiio["out"]['dataWindow'].getValue() ) )
def testFrameRangeMask( self ) :
testSequence = IECore.FileSequence( self.temporaryDirectory() + "/incompleteSequence.####.exr" )
shutil.copyfile( self.fileName, testSequence.fileNameForFrame( 1 ) )
shutil.copyfile( self.fileName, testSequence.fileNameForFrame( 3 ) )
shutil.copyfile( self.offsetDataWindowFileName, testSequence.fileNameForFrame( 5 ) )
shutil.copyfile( self.offsetDataWindowFileName, testSequence.fileNameForFrame( 7 ) )
reader = GafferImage.ImageReader()
reader["fileName"].setValue( testSequence.fileName )
reader["missingFrameMode"].setValue( GafferImage.ImageReader.MissingFrameMode.Hold )
oiio = GafferImage.OpenImageIOReader()
oiio["fileName"].setValue( testSequence.fileName )
oiio["missingFrameMode"].setValue( GafferImage.ImageReader.MissingFrameMode.Hold )
context = Gaffer.Context()
# make sure the tile we're comparing isn't black
# so we can tell if BlackOutside is working.
blackTile = IECore.FloatVectorData( [ 0 ] * GafferImage.ImagePlug.tileSize() * GafferImage.ImagePlug.tileSize() )
with context :
for i in range( 1, 11 ) :
context.setFrame( i )
self.assertNotEqual( reader["out"].channelData( "R", imath.V2i( 0 ) ), blackTile )
def assertBlack() :
# format and data window still match
self.assertEqual( reader["out"]["format"].getValue(), oiio["out"]["format"].getValue() )
self.assertEqual( reader["out"]["dataWindow"].getValue(), oiio["out"]["dataWindow"].getValue() )
self.assertNotEqual( GafferImage.ImageAlgo.image( reader["out"] ), GafferImage.ImageAlgo.image( oiio["out"] ) )
# the metadata and channel names are at the defaults
self.assertEqual( reader["out"]["metadata"].getValue(), reader["out"]["metadata"].defaultValue() )
self.assertEqual( reader["out"]["channelNames"].getValue(), reader["out"]["channelNames"].defaultValue() )
# channel data is black
self.assertEqual( reader["out"].channelData( "R", imath.V2i( 0 ) ), blackTile )
def assertMatch() :
self.assertEqual( reader["out"]["format"].getValue(), oiio["out"]["format"].getValue() )
self.assertEqual( reader["out"]["dataWindow"].getValue(), oiio["out"]["dataWindow"].getValue() )
self.assertEqual( reader["out"]["metadata"].getValue(), oiio["out"]["metadata"].getValue() )
self.assertEqual( reader["out"]["channelNames"].getValue(), oiio["out"]["channelNames"].getValue() )
self.assertEqual( reader["out"].channelData( "R", imath.V2i( 0 ) ), oiio["out"].channelData( "R", imath.V2i( 0 ) ) )
self.assertImagesEqual( reader["out"], oiio["out"] )
def assertHold( holdFrame ) :
context = Gaffer.Context()
context.setFrame( holdFrame )
with context :
holdImage = GafferImage.ImageAlgo.image( reader["out"] )
holdFormat = reader["out"]["format"].getValue()
holdDataWindow = reader["out"]["dataWindow"].getValue()
holdMetadata = reader["out"]["metadata"].getValue()
holdChannelNames = reader["out"]["channelNames"].getValue()
holdTile = reader["out"].channelData( "R", imath.V2i( 0 ) )
self.assertEqual( reader["out"]["format"].getValue(), holdFormat )
self.assertEqual( reader["out"]["dataWindow"].getValue(), holdDataWindow )
self.assertEqual( reader["out"]["metadata"].getValue(), holdMetadata )
self.assertEqual( reader["out"]["channelNames"].getValue(), holdChannelNames )
self.assertEqual( reader["out"].channelData( "R", imath.V2i( 0 ) ), holdTile )
self.assertEqual( GafferImage.ImageAlgo.image( reader["out"] ), holdImage )
reader["start"]["frame"].setValue( 4 )
reader["end"]["frame"].setValue( 7 )
# frame 0 errors, match from 1-10
reader["start"]["mode"].setValue( GafferImage.ImageReader.FrameMaskMode.None_ )
reader["end"]["mode"].setValue( GafferImage.ImageReader.FrameMaskMode.None_ )
with context :
for i in range( 0, 11 ) :
context.setFrame( i )
assertMatch()
# black from 0-3, match from 4-10
reader["start"]["mode"].setValue( GafferImage.ImageReader.FrameMaskMode.BlackOutside )
with context :
for i in range( 0, 4 ) :
context.setFrame( i )
assertBlack()
for i in range( 4, 11 ) :
context.setFrame( i )
assertMatch()
# black from 0-3, match from 4-7, black from 8-10
reader["end"]["mode"].setValue( GafferImage.ImageReader.FrameMaskMode.BlackOutside )
with context :
for i in range( 0, 4 ) :
context.setFrame( i )
assertBlack()
for i in range( 4, 8 ) :
context.setFrame( i )
assertMatch()
for i in range( 8, 11 ) :
context.setFrame( i )
assertBlack()
# hold frame 4 from 0-3, match from 4-7, black from 8-10
reader["start"]["mode"].setValue( GafferImage.ImageReader.FrameMaskMode.ClampToFrame )
with context :
for i in range( 0, 4 ) :
context.setFrame( i )
assertHold( 4 )
for i in range( 4, 8 ) :
context.setFrame( i )
assertMatch()
for i in range( 8, 11 ) :
context.setFrame( i )
assertBlack()
# hold frame 4 from 0-3, match from 4-7, hold frame 7 from 8-10
reader["end"]["mode"].setValue( GafferImage.ImageReader.FrameMaskMode.ClampToFrame )
with context :
for i in range( 0, 4 ) :
context.setFrame( i )
assertHold( 4 )
for i in range( 4, 8 ) :
context.setFrame( i )
assertMatch()
for i in range( 8, 11 ) :
context.setFrame( i )
assertHold( 7 )
def testDefaultColorSpaceFunctionArguments( self ) :
# Make a network to write and read an image
# in various formats.
c = GafferImage.Constant()
c["format"].setValue( GafferImage.Format( 64, 64 ) )
w = GafferImage.ImageWriter()
w["in"].setInput( c["out"] )
r = GafferImage.ImageReader()
r["fileName"].setInput( w["fileName"] )
# Register a custom colorspace function that
# just captures its arguments.
capturedArguments = {}
def f( fileName, fileFormat, dataType, metadata ) :
capturedArguments.update(
{
"fileName" : fileName,
"fileFormat" : fileFormat,
"dataType" : dataType,
"metadata" : metadata,
}
)
return "linear"
GafferImage.ImageReader.setDefaultColorSpaceFunction( f )
# Verify that the correct arguments are passed for
# a variety of fileNames and dataTypes.
for ext, fileFormat, dataType in [
( "exr", "openexr", "half" ),
( "dpx", "dpx", "uint12" ),
( "TIFF", "tiff", "float" ),
( "tif", "tiff", "uint32" ),
] :
w["fileName"].setValue( "{0}/{1}.{2}".format( self.temporaryDirectory(), dataType, ext ) )
w[fileFormat]["dataType"].setValue( dataType )
w.execute()
capturedArguments.clear()
r["out"].channelData( "R", imath.V2i( 0 ) ) # Triggers call to color space function
self.assertEqual( len( capturedArguments ), 4 )
self.assertEqual( capturedArguments["fileName"], w["fileName"].getValue() )
self.assertEqual( capturedArguments["fileFormat"], fileFormat )
self.assertEqual( capturedArguments["dataType"], dataType )
self.assertEqual( capturedArguments["metadata"], r["out"]["metadata"].getValue() )
def testDisabling( self ) :
reader = GafferImage.ImageReader()
reader["fileName"].setValue( self.fileName )
reader["enabled"].setValue( False )
constant = GafferImage.Constant()
constant["enabled"].setValue( False )
self.assertImagesEqual( reader["out"], constant["out"] )
if __name__ == "__main__":
unittest.main()
|
tensorflow/security/fuzzing/dataFormatVecPermute_fuzz.py
|
EricRemmerswaal/tensorflow
| 190,993 |
129941
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This is a Python API fuzzer for tf.raw_ops.DataFormatVecPermute."""
import atheris
with atheris.instrument_imports():
import sys
from python_fuzzing import FuzzingHelper
import tensorflow as tf
@atheris.instrument_func
def TestOneInput(input_bytes):
"""Test randomized integer fuzzing input for tf.raw_ops.DataFormatVecPermute."""
fh = FuzzingHelper(input_bytes)
dtype = fh.get_tf_dtype()
# Max shape can be 8 in length and randomized from 0-8 without running into
# a OOM error.
shape = fh.get_int_list(min_length=0, max_length=8, min_int=0, max_int=8)
seed = fh.get_int()
try:
x = tf.random.uniform(shape=shape, dtype=dtype, seed=seed)
src_format_digits = str(fh.get_int(min_int=0, max_int=999999999))
dest_format_digits = str(fh.get_int(min_int=0, max_int=999999999))
_ = tf.raw_ops.DataFormatVecPermute(
x,
src_format=src_format_digits,
dst_format=dest_format_digits,
name=fh.get_string())
except (tf.errors.InvalidArgumentError, ValueError, TypeError):
pass
def main():
atheris.Setup(sys.argv, TestOneInput, enable_python_coverage=True)
atheris.Fuzz()
if __name__ == '__main__':
main()
|
docs/demos/multi_page_basics/pages/outlook.py
|
ruxi/dash-labs
| 110 |
129949
|
import dash
dash.register_page(
__name__,
title="Forward Outlook",
description="This is the forward outlook", # should accept callable too
path="/forward-outlook",
image="birds.jpeg",
)
def layout():
return "Forward outlook"
|
tests/general_spinless_fermion_opstr_test.py
|
anton-buyskikh/QuSpin
| 195 |
129994
|
from __future__ import print_function, division
import sys,os
qspin_path = os.path.join(os.getcwd(),"../")
sys.path.insert(0,qspin_path)
from quspin.basis import spinless_fermion_basis_1d
from quspin.basis import spinless_fermion_basis_general
import numpy as np
from itertools import product
def check_ME(b1,b2,opstr,indx,dtype,err_msg):
if b1.Ns != b2.Ns:
print(b1._basis)
print(b2._basis)
raise Exception("number of states do not match.")
ME1,row1,col1=b1.Op(opstr,indx,1.0,dtype)
ME2,row2,col2=b2.Op(opstr,indx,1.0,dtype)
if len(ME1) != len(ME2):
print(ME1)
print(row1)
print(col1)
print()
print(ME2)
print(row2)
print(col2)
raise Exception("number of matrix elements do not match.")
if len(ME1)>0 and len(ME2)>0:
try:
np.testing.assert_allclose(row1-row2,0,atol=1e-6,err_msg=err_msg)
np.testing.assert_allclose(col1-col2,0,atol=1e-6,err_msg=err_msg)
np.testing.assert_allclose(ME1-ME2,0,atol=1e-6,err_msg=err_msg)
except:
print(ME1)
print(row1)
print(col1)
print()
print(ME2)
print(row2)
print(col2)
raise Exception
def test_gen_basis_spinless_fermion(l_max,N=4):
L=6
kblocks = [None]
kblocks.extend(range(L))
pblocks = [None,0,1]
ops = ["n","z","+","-","I"]
Nfs = [None,N]
t = np.array([(i+1)%L for i in range(L)])
p = np.array([L-i-1 for i in range(L)])
for Nf,kblock,pblock in product(Nfs,kblocks,pblocks):
gen_blocks = {}
basis_blocks = {}
if kblock==0 or kblock==L//2:
if pblock is not None:
basis_blocks["pblock"] = (-1)**pblock
gen_blocks["pblock"] = (p,pblock)
else:
basis_blocks["pblock"] = None
gen_blocks["pblock"] = None
else:
basis_blocks["pblock"] = None
gen_blocks["pblock"] = None
if kblock is not None:
basis_blocks["kblock"] = kblock
gen_blocks["kblock"] = (t,kblock)
else:
basis_blocks["kblock"] = None
gen_blocks["kblock"] = None
basis_1d = spinless_fermion_basis_1d(L,Nf=Nf,**basis_blocks)
gen_basis = spinless_fermion_basis_general(L,Nf=Nf,**gen_blocks)
n = basis_1d._get_norms(np.float64)**2
n_gen = (gen_basis._n.astype(np.float64))*gen_basis._pers.prod()
if basis_1d.Ns != gen_basis.Ns:
print(L,basis_blocks)
print(basis_1d)
print(gen_basis)
raise ValueError("basis size mismatch")
np.testing.assert_allclose(basis_1d._basis-gen_basis._basis,0,atol=1e-6)
np.testing.assert_allclose(n-n_gen ,0,atol=1e-6)
for l in range(1,l_max+1):
for i0 in range(0,L-l+1,1):
indx = range(i0,i0+l,1)
for opstr in product(*[ops for i in range(l)]):
opstr = "".join(list(opstr))
printing = dict(basis_blocks)
printing["opstr"]=opstr
printing["indx"]=indx
printing["Nf"]=Nf
err_msg="testing: {opstr:} {indx:} Nf={Nf:} kblock={kblock:} pblock={pblock:}".format(**printing)
check_ME(basis_1d,gen_basis,opstr,indx,np.complex128,err_msg)
print("testing Nf=4")
test_gen_basis_spinless_fermion(3,N=4)
print("testing Nf=5")
test_gen_basis_spinless_fermion(3,N=5)
print("testing Nf=6")
test_gen_basis_spinless_fermion(3,N=6)
|
update/venv/lib/python3.9/site-packages/fontTools/__init__.py
|
Imudassir77/material-design-icons
| 38,667 |
129996
|
<reponame>Imudassir77/material-design-icons
import logging
from fontTools.misc.loggingTools import configLogger
log = logging.getLogger(__name__)
version = __version__ = "4.22.1"
__all__ = ["version", "log", "configLogger"]
|
benchmarks/proximal_gradient_benchmark.py
|
GeoffNN/jaxopt
| 434 |
130002
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark JAX implementation vs. NumPy implementation of proximal gradient."""
import time
from typing import NamedTuple
from typing import Sequence
from absl import app
from absl import flags
from sklearn import datasets
from sklearn import preprocessing
import numpy as onp
import jax
import jax.numpy as jnp
from jaxopt import base
from jaxopt import proximal_gradient2 as pg
from jaxopt.prox import prox_lasso
FLAGS = flags.FLAGS
flags.DEFINE_string("dataset", default="boston", help=("Dataset to use."))
flags.DEFINE_bool("float64", default=False, help=("Enable double precision."))
flags.DEFINE_float("lam", default=1.0, help=("Regularization value."))
flags.DEFINE_integer("maxiter", default=200, help=("Max # of iterations."))
flags.DEFINE_integer("n_samples", default=100000, help=("Number of samples."))
flags.DEFINE_integer("n_features", default=200, help=("Number of features."))
flags.DEFINE_bool("verbose", default=False, help=("Enable verbose output."))
def _make_linesearch(fun, prox, maxls):
def linesearch(curr_x, curr_x_fun_val, curr_x_fun_grad, curr_stepsize):
"""A pure NumPy re-implementation of linesearch for benchmarking reasons."""
for _ in range(maxls):
next_x = prox(curr_x - curr_stepsize * curr_x_fun_grad, curr_stepsize)
diff = next_x - curr_x
sqdist = onp.vdot(diff, diff)
value_F = fun(next_x)
value_Q = (curr_x_fun_val + onp.vdot(diff, curr_x_fun_grad) +
0.5 * sqdist / curr_stepsize)
if value_F <= value_Q:
return next_x, curr_stepsize
curr_stepsize *= 0.5
# Undo the last decrase when `maxls` is reached.
curr_stepsize *= 2
return next_x, curr_stepsize
return linesearch
def proximal_gradient_onp(fun, init, prox, stepsize, maxiter=200, maxls=15,
tol=1e-3, verbose=0):
"""A pure NumPy re-implementation of proximal gradient for benchmarking."""
curr_x = init
curr_stepsize = 1.0
linesearch = _make_linesearch(fun, prox, maxls)
for iter_num in range(1, maxiter + 1):
# Convergence monitoring.
curr_x_fun_val, curr_x_fun_grad = fun(curr_x, grad=True)
diff_x = curr_x - prox(curr_x - curr_x_fun_grad)
curr_error = onp.sqrt(onp.sum(diff_x ** 2))
if verbose: print(iter_num, curr_error)
if curr_error <= tol: break
if stepsize <= 0:
# With line search.
curr_x, curr_stepsize = linesearch(curr_x, curr_x_fun_val,
curr_x_fun_grad, curr_stepsize)
if curr_stepsize <= 1e-6:
# Reset step size.
curr_stepsize = 1.0
else:
curr_stepsize *= 2
else:
# Without line search.
curr_x = prox(curr_x - stepsize * curr_x_fun_grad, stepsize)
state = pg.ProxGradState(iter_num=iter_num, error=curr_error,
stepsize=curr_stepsize)
return base.OptStep(params=curr_x, state=state)
def proximal_gradient_accel_onp(fun, init, prox, stepsize, maxiter=200,
maxls=15, tol=1e-3, verbose=0):
"""A pure NumPy re-implementation of proximal gradient with acceleration."""
curr_x = init
curr_y = init
curr_t = 1.0
curr_stepsize = 1.0
linesearch = _make_linesearch(fun, prox, maxls)
for iter_num in range(1, maxiter + 1):
# Convergence monitoring
curr_x_fun_grad = fun(curr_x, grad=True)[1]
diff_x = curr_x - prox(curr_x - curr_x_fun_grad)
curr_error = onp.sqrt(onp.sum(diff_x ** 2))
if verbose: print(iter_num, curr_error)
if curr_error <= tol: break
# Iteration.
curr_y_fun_val, curr_y_fun_grad = fun(curr_y, grad=True)
if stepsize <= 0:
# With line search.
next_x, curr_stepsize = linesearch(curr_y, curr_y_fun_val,
curr_y_fun_grad, curr_stepsize)
if curr_stepsize <= 1e-6:
# Reset step size.
curr_stepsize = 1.0
else:
curr_stepsize *= 2
else:
# Without line search.
next_x = prox(curr_y - stepsize * curr_y_fun_grad, stepsize)
next_t = 0.5 * (1 + onp.sqrt(1 + 4 * curr_t ** 2))
diff_x = next_x - curr_x
next_y = next_x + (curr_t - 1) / next_t * diff_x
curr_x = next_x
curr_y = next_y
curr_t = next_t
state = pg.ProxGradState(iter_num=iter_num, error=curr_error,
stepsize=curr_stepsize)
return base.OptStep(params=curr_x, state=state)
def lasso_onp(X, y, lam, stepsize, tol, maxiter, acceleration, verbose):
def fun(w, grad=False):
y_pred = onp.dot(X, w)
diff = y_pred - y
obj = 0.5 * onp.dot(diff, diff)
if not grad: return obj
g = onp.dot(X.T, diff)
return obj, g
def prox(w, stepsize=1.0):
return onp.sign(w) * onp.maximum(onp.abs(w) - lam * stepsize, 0)
init = onp.zeros(X.shape[1], dtype=X.dtype)
solver_fun = proximal_gradient_accel_onp if acceleration else proximal_gradient_onp
return solver_fun(fun=fun, init=init, prox=prox, stepsize=stepsize,
maxiter=maxiter, tol=tol, verbose=verbose)
def lasso_jnp(X, y, lam, stepsize, tol, maxiter, acceleration, verbose):
def fun(w, data):
X, y = data
y_pred = jnp.dot(X, w)
diff = y_pred - y
return 0.5 * jnp.dot(diff, diff)
init = jnp.zeros(X.shape[1], dtype=X.dtype)
solver = pg.ProximalGradient(fun=fun, prox=prox_lasso,
tol=tol, stepsize=stepsize, maxiter=maxiter,
acceleration=acceleration, verbose=verbose)
return solver.run(init, lam, (X, y))
def run_proximal_gradient(X, y, lam, stepsize, maxiter, verbose):
if stepsize <= 0:
print("proximal gradient (line search)")
else:
print("proximal gradient (constant step size)")
print("-" * 50)
start = time.time()
res_onp = lasso_onp(X=X, y=y, lam=lam, stepsize=stepsize, tol=1e-3,
maxiter=maxiter, acceleration=False,
verbose=verbose).state
print("error onp:", res_onp.error)
print("iter_num onp:", res_onp.iter_num)
print("time onp", time.time() - start)
print(flush=True)
start = time.time()
res_jnp = lasso_jnp(X=X, y=y, lam=lam, stepsize=stepsize, tol=1e-3,
maxiter=maxiter, acceleration=False,
verbose=verbose).state
print("error jnp:", res_jnp.error)
print("iter_num jnp:", res_jnp.iter_num)
print("time jnp", time.time() - start)
print(flush=True)
def run_accelerated_proximal_gradient(X, y, lam, stepsize, maxiter, verbose):
if stepsize <= 0:
print("accelerated proximal gradient descent (line search)")
else:
print("accelerated proximal gradient descent (constant step size)")
print("-" * 50)
start = time.time()
res_onp = lasso_onp(X=X, y=y, lam=lam, stepsize=stepsize, tol=1e-3,
maxiter=maxiter, acceleration=True,
verbose=verbose).state
print("error onp:", res_onp.error)
print("iter_num onp:", res_onp.iter_num)
print("time onp", time.time() - start)
print(flush=True)
start = time.time()
res_jnp = lasso_jnp(X=X, y=y, lam=lam, stepsize=stepsize, tol=1e-3,
maxiter=maxiter, acceleration=True, verbose=verbose).state
print("error jnp:", res_jnp.error)
print("iter_num jnp:", res_jnp.iter_num)
print("time jnp", time.time() - start)
print(flush=True)
def load_dataset(dataset, float64=False):
if dataset == "boston":
X, y = datasets.load_boston(return_X_y=True)
elif dataset == "synth":
X, y = datasets.make_classification(n_samples=FLAGS.n_samples,
n_features=FLAGS.n_features,
n_classes=2,
random_state=0)
else:
raise ValueError("Invalid dataset.")
X = preprocessing.Normalizer().fit_transform(X)
if not float64:
X = X.astype(onp.float32)
y = y.astype(onp.float32)
return X, y
def main(argv: Sequence[str]) -> None:
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
if FLAGS.float64:
jax.config.update("jax_enable_x64", True)
X, y = load_dataset(FLAGS.dataset, FLAGS.float64)
print("Dataset:", FLAGS.dataset)
print("n_samples:", X.shape[0])
print("n_features:", X.shape[1])
print("lambda:", FLAGS.lam)
print("maxiter:", FLAGS.maxiter)
print("float64:", FLAGS.float64)
print()
kw = dict(lam=FLAGS.lam, maxiter=FLAGS.maxiter, verbose=FLAGS.verbose)
run_proximal_gradient(X, y, stepsize=1e-3, **kw)
run_proximal_gradient(X, y, stepsize=0, **kw)
run_accelerated_proximal_gradient(X, y, stepsize=1e-3, **kw)
run_accelerated_proximal_gradient(X, y, stepsize=0, **kw)
if __name__ == '__main__':
app.run(main)
|
tests/test_json_loader.py
|
sephiartlist/dynaconf
| 2,293 |
130016
|
<filename>tests/test_json_loader.py
import json
import pytest
from dynaconf import LazySettings
from dynaconf.loaders.json_loader import DynaconfEncoder
from dynaconf.loaders.json_loader import load
from dynaconf.strategies.filtering import PrefixFilter
settings = LazySettings(environments=True, ENV_FOR_DYNACONF="PRODUCTION")
JSON = """
{
"a": "a,b",
"default": {
"password": <PASSWORD>",
"host": "server.com",
"port": "@int 8080",
"alist": ["item1", "item2", 23],
"service": {
"url": "service.com",
"port": 80,
"auth": {
"password": "<PASSWORD>",
"test": 1234
}
}
},
"development": {
"password": <PASSWORD>",
"host": "devserver.com"
},
"production": {
"password": <PASSWORD>",
"host": "prodserver.com"
},
"global": {
"global_value": "global"
}
}
"""
# the @float is not needed in JSON but kept to ensure it works
JSON2 = """
{
"global": {
"secret": "@float 42",
"password": <PASSWORD>,
"host": "otherjson.com"
}
}
"""
JSONS = [JSON, JSON2]
def test_load_from_json():
"""Assert loads from JSON string"""
load(settings, filename=JSON)
assert settings.HOST == "prodserver.com"
assert settings.PORT == 8080
assert settings.ALIST == ["item1", "item2", 23]
assert settings.SERVICE["url"] == "service.com"
assert settings.SERVICE.url == "service.com"
assert settings.SERVICE.port == 80
assert settings.SERVICE.auth.password == "<PASSWORD>"
assert settings.SERVICE.auth.test == 1234
load(settings, filename=JSON, env="DEVELOPMENT")
assert settings.HOST == "devserver.com"
load(settings, filename=JSON)
assert settings.HOST == "prodserver.com"
def test_load_from_multiple_json():
"""Assert loads from JSON string"""
load(settings, filename=JSONS)
assert settings.HOST == "otherjson.com"
assert settings.PASSWORD == <PASSWORD>
assert settings.SECRET == 42.0
assert settings.PORT == 8080
assert settings.SERVICE["url"] == "service.com"
assert settings.SERVICE.url == "service.com"
assert settings.SERVICE.port == 80
assert settings.SERVICE.auth.password == "<PASSWORD>"
assert settings.SERVICE.auth.test == 1234
load(settings, filename=JSONS, env="DEVELOPMENT")
assert settings.PORT == 8080
assert settings.HOST == "otherjson.com"
load(settings, filename=JSONS)
assert settings.HOST == "otherjson.com"
assert settings.PASSWORD == <PASSWORD>
load(settings, filename=JSON, env="DEVELOPMENT")
assert settings.PORT == 8080
assert settings.HOST == "devserver.com"
load(settings, filename=JSON)
assert settings.HOST == "prodserver.com"
assert settings.PASSWORD == <PASSWORD>
def test_no_filename_is_none():
"""Assert if passed no filename return is None"""
assert load(settings) is None
def test_key_error_on_invalid_env():
"""Assert error raised if env is not found in JSON"""
with pytest.raises(KeyError):
load(settings, filename=JSON, env="FOOBAR", silent=False)
def test_no_key_error_on_invalid_env():
"""Assert error raised if env is not found in JSON"""
load(settings, filename=JSON, env="FOOBAR", silent=True)
def test_load_single_key():
"""Test loading a single key"""
_JSON = """
{
"foo": {
"bar": "blaz",
"zaz": "naz"
}
}
"""
load(settings, filename=_JSON, env="FOO", key="bar")
assert settings.BAR == "blaz"
assert settings.exists("BAR") is True
assert settings.exists("ZAZ") is False
def test_empty_value():
load(settings, filename="")
def test_multiple_filenames():
load(settings, filename="a.json,b.json,c.json,d.json")
def test_cleaner():
load(settings, filename=JSON)
assert settings.HOST == "prodserver.com"
assert settings.PORT == 8080
assert settings.ALIST == ["item1", "item2", 23]
assert settings.SERVICE["url"] == "service.com"
assert settings.SERVICE.url == "service.com"
assert settings.SERVICE.port == 80
assert settings.SERVICE.auth.password == "<PASSWORD>"
assert settings.SERVICE.auth.test == 1234
load(settings, filename=JSON, env="DEVELOPMENT")
assert settings.HOST == "devserver.com"
load(settings, filename=JSON)
assert settings.HOST == "prodserver.com"
settings.clean()
with pytest.raises(AttributeError):
assert settings.HOST == "prodserver.com"
def test_using_env(tmpdir):
load(settings, filename=JSON)
assert settings.HOST == "prodserver.com"
tmpfile = tmpdir.mkdir("sub").join("test_using_env.json")
tmpfile.write(JSON)
with settings.using_env("DEVELOPMENT", filename=str(tmpfile)):
assert settings.HOST == "devserver.com"
assert settings.HOST == "prodserver.com"
def test_load_dunder():
"""Test loading with dunder settings"""
_JSON = """
{
"foo": {
"colors__yellow__code": "#FFCC00",
"COLORS__yellow__name": "Yellow"
}
}
"""
load(settings, filename=_JSON, env="FOO")
assert settings.COLORS.yellow.code == "#FFCC00"
assert settings.COLORS.yellow.name == "Yellow"
def test_dynaconf_encoder():
class Dummy:
def _dynaconf_encode(self):
return "Dummy"
class DummyNotSerializable:
_dynaconf_encode = 42
data = {"dummy": Dummy()}
data_error = {"dummy": DummyNotSerializable()}
assert json.dumps(data, cls=DynaconfEncoder) == '{"dummy": "Dummy"}'
with pytest.raises(TypeError):
json.dumps(data_error, cls=DynaconfEncoder)
def test_envless():
settings = LazySettings()
_json = """
{
"colors__yellow__code": "#FFCC00",
"COLORS__yellow__name": "Yellow"
}
"""
load(settings, filename=_json)
assert settings.COLORS.yellow.code == "#FFCC00"
assert settings.COLORS.yellow.name == "Yellow"
def test_prefix():
settings = LazySettings(filter_strategy=PrefixFilter("prefix"))
_json = """
{
"prefix_colors__yellow__code": "#FFCC00",
"COLORS__yellow__name": "Yellow"
}
"""
load(settings, filename=_json)
assert settings.COLORS.yellow.code == "#FFCC00"
with pytest.raises(AttributeError):
settings.COLORS.yellow.name
|
stream/clients/python/bookkeeper/common/future/_helpers.py
|
pkumar-singh/bookkeeper
| 1,545 |
130022
|
<reponame>pkumar-singh/bookkeeper
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Private helpers for futures."""
import logging
import threading
_LOGGER = logging.getLogger(__name__)
def start_daemon_thread(*args, **kwargs):
"""Starts a thread and marks it as a daemon thread."""
thread = threading.Thread(*args, **kwargs)
thread.daemon = True
thread.start()
return thread
def safe_invoke_callback(callback, *args, **kwargs):
"""Invoke a callback, swallowing and logging any exceptions."""
# pylint: disable=bare-except
# We intentionally want to swallow all exceptions.
try:
return callback(*args, **kwargs)
except Exception:
_LOGGER.exception('Error while executing Future callback.')
|
deps/v8/tools/testrunner/objects/testcase.py
|
zeusdeux/node
| 239 |
130029
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import output
class TestCase(object):
def __init__(self, suite, path, variant='default', flags=None,
dependency=None):
self.suite = suite # TestSuite object
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
self.flags = flags or [] # list of strings, flags specific to this test
self.variant = variant # name of the used testing variant
self.dependency = dependency # |path| for testcase that must be run first
self.outcomes = set([])
self.output = None
self.id = None # int, used to map result back to TestCase instance
self.duration = None # assigned during execution
self.run = 1 # The nth time this test is executed.
def CopyAddingFlags(self, variant, flags):
copy = TestCase(self.suite, self.path, variant, self.flags + flags,
self.dependency)
copy.outcomes = self.outcomes
return copy
def PackTask(self):
"""
Extracts those parts of this object that are required to run the test
and returns them as a JSON serializable object.
"""
assert self.id is not None
return [self.suitename(), self.path, self.variant, self.flags,
self.dependency, list(self.outcomes or []), self.id]
@staticmethod
def UnpackTask(task):
"""Creates a new TestCase object based on packed task data."""
# For the order of the fields, refer to PackTask() above.
test = TestCase(str(task[0]), task[1], task[2], task[3], task[4])
test.outcomes = set(task[5])
test.id = task[6]
test.run = 1
return test
def SetSuiteObject(self, suites):
self.suite = suites[self.suite]
def PackResult(self):
"""Serializes the output of the TestCase after it has run."""
self.suite.StripOutputForTransmit(self)
return [self.id, self.output.Pack(), self.duration]
def MergeResult(self, result):
"""Applies the contents of a Result to this object."""
assert result[0] == self.id
self.output = output.Output.Unpack(result[1])
self.duration = result[2]
def suitename(self):
return self.suite.name
def GetLabel(self):
return self.suitename() + "/" + self.suite.CommonTestName(self)
|
utils/android/adb_test_runner/main.py
|
lwhsu/swift
| 72,551 |
130034
|
# main.py - Push executables and run them on an Android device -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
#
# lit tests assume a single program can be invoked to execute Swift code and
# make expectations upon the output. This program is a wrapper that has the
# same API, but runs the Swift program on an Android device instead of on the
# host.
#
# ----------------------------------------------------------------------------
from __future__ import print_function
import os
import sys
from adb.commands import execute_on_device
def _usage(program_name):
return 'usage: {} [executable_path] [executable_arguments]'.format(
program_name)
def _help(program_name):
return '{}\n\n'.format(_usage(program_name)) + \
'positional arguments:\n' + \
'\texecutable_path\t\tThe path to a local executable that is to ' + \
'be run on a connected Android device.\n' + \
'\texecutable_arguments\tAdditional arguments that are to be ' + \
'given to the executable when it is run on the device.\n'
def main(args=sys.argv):
"""
The main entry point for adb_test_runner.
Parse arguments and kick off the script. Return zero to indicate success,
a non-zero integer otherwise.
"""
# We don't use argparse, because we need to be able to pass
# --arbitrary -params --like=this to the executable we're running
# on device.
program_name = os.path.basename(args.pop(0))
if len(args) == 1 and args[0] in ['-h', '--help']:
print(_help(program_name))
return 0
try:
executable_path, executable_arguments = args[0], args[1:]
except IndexError:
print(_usage(program_name))
print('{}: error: argument "executable_path" is required'.format(
program_name))
return 1
return execute_on_device(executable_path, executable_arguments)
|
src/ostorlab/agent/message/proto/v2/scan_pb2.py
|
bbhunter/ostorlab
| 113 |
130036
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: v2/scan.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='v2/scan.proto',
package='',
serialized_pb=_b('\n\rv2/scan.proto\"\x17\n\x04scan\x12\x0f\n\x07scan_id\x18\x01 \x02(\x05')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SCAN = _descriptor.Descriptor(
name='scan',
full_name='scan',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='scan_id', full_name='scan.scan_id', index=0,
number=1, type=5, cpp_type=1, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
oneofs=[
],
serialized_start=17,
serialized_end=40,
)
DESCRIPTOR.message_types_by_name['scan'] = _SCAN
scan = _reflection.GeneratedProtocolMessageType('scan', (_message.Message,), dict(
DESCRIPTOR=_SCAN,
__module__='v2.scan_pb2'
# @@protoc_insertion_point(class_scope:scan)
))
_sym_db.RegisterMessage(scan)
# @@protoc_insertion_point(module_scope)
|
apps/hosts/migrations/Add_blank_in_host_team_URL.py
|
kaustubh-s1/EvalAI
| 1,470 |
130038
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-01-10 18:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("hosts", "0003_challengehostteam_team_url")]
operations = [
migrations.AlterField(
model_name="challengehostteam",
name="team_url",
field=models.CharField(blank=True, default="", max_length=1000),
)
]
|
tasks/language_model/chinese_bert.py
|
chinaliwenbo/ChineseBert
| 298 |
130049
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@file : chinese_bert.py
@author: zijun
@contact : <EMAIL>
@date : 2021/7/5 11:22
@version: 1.0
@desc :
"""
import argparse
from datasets.bert_dataset import BertDataset
from models.modeling_glycebert import GlyceBertModel
def sentence_hidden():
# init args
parser = argparse.ArgumentParser(description="Chinese Bert Hidden")
parser.add_argument("--pretrain_path", required=True, type=str, help="pretrain model path")
parser.add_argument("--sentence", required=True, type=str, help="input sentence")
args = parser.parse_args()
# step 1: tokenizer
tokenizer = BertDataset(args.pretrain_path)
# step 2: load model
chinese_bert = GlyceBertModel.from_pretrained(args.pretrain_path)
# step 3: get hidden
input_ids, pinyin_ids = tokenizer.tokenize_sentence(args.sentence)
length = input_ids.shape[0]
input_ids = input_ids.view(1, length)
pinyin_ids = pinyin_ids.view(1, length, 8)
output_hidden = chinese_bert.forward(input_ids, pinyin_ids)[0]
print(output_hidden)
if __name__ == '__main__':
sentence_hidden()
|
tests/k8s/conftest.py
|
pawelkopka/kopf
| 1,038 |
130088
|
import pytest
@pytest.fixture(autouse=True)
def _autouse_resp_mocker(resp_mocker, version_api):
pass
|
uldlib/cmd.py
|
SpiReCZ/ulozto-streamer
| 120 |
130097
|
<filename>uldlib/cmd.py
import argparse
import sys
import signal
from os import path
from uldlib import downloader, captcha, __version__, __path__
def run():
parser = argparse.ArgumentParser(
description='Download file from Uloz.to using multiple parallel downloads.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument('url', metavar='URL', type=str,
help="URL from Uloz.to (tip: enter in 'quotes' because the URL contains ! sign)")
parser.add_argument('--parts', metavar='N', type=int, default=10,
help='Number of parts that will be downloaded in parallel')
parser.add_argument('--output', metavar='DIRECTORY',
type=str, default="./", help='Target directory')
parser.add_argument('--auto-captcha', default=False, action="store_true",
help='Try to solve captchas automatically using TensorFlow')
parser.add_argument('--version', action='version', version=__version__)
args = parser.parse_args()
if args.auto_captcha:
model_path = path.join(__path__[0], "model.tflite")
model_download_url = "https://github.com/JanPalasek/ulozto-captcha-breaker/releases/download/v2.2/model.tflite"
captcha_solve_fnc = captcha.AutoReadCaptcha(
model_path, model_download_url)
else:
captcha_solve_fnc = captcha.tkinter_user_prompt
d = downloader.Downloader(captcha_solve_fnc)
# Register sigint handler
def sigint_handler(sig, frame):
d.terminate()
print('Program terminated.')
sys.exit(1)
signal.signal(signal.SIGINT, sigint_handler)
d.download(args.url, args.parts, args.output)
d.terminate()
|
python-modules/twisted/twisted/mail/smtp.py
|
stormtheh4ck3r/python-for-android
| 267 |
130106
|
<gh_stars>100-1000
# -*- test-case-name: twisted.mail.test.test_smtp -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Simple Mail Transfer Protocol implementation.
"""
import time, re, base64, types, socket, os, random, rfc822
import binascii
from email.base64MIME import encode as encode_base64
from zope.interface import implements, Interface
from twisted.copyright import longversion
from twisted.protocols import basic
from twisted.protocols import policies
from twisted.internet import protocol
from twisted.internet import defer
from twisted.internet import error
from twisted.internet import reactor
from twisted.internet.interfaces import ITLSTransport
from twisted.python import log
from twisted.python import util
from twisted import cred
from twisted.python.runtime import platform
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Cache the hostname (XXX Yes - this is broken)
if platform.isMacOSX():
# On OS X, getfqdn() is ridiculously slow - use the
# probably-identical-but-sometimes-not gethostname() there.
DNSNAME = socket.gethostname()
else:
DNSNAME = socket.getfqdn()
# Used for fast success code lookup
SUCCESS = dict(map(None, range(200, 300), []))
class IMessageDelivery(Interface):
def receivedHeader(helo, origin, recipients):
"""
Generate the Received header for a message
@type helo: C{(str, str)}
@param helo: The argument to the HELO command and the client's IP
address.
@type origin: C{Address}
@param origin: The address the message is from
@type recipients: C{list} of L{User}
@param recipients: A list of the addresses for which this message
is bound.
@rtype: C{str}
@return: The full \"Received\" header string.
"""
def validateTo(user):
"""
Validate the address for which the message is destined.
@type user: C{User}
@param user: The address to validate.
@rtype: no-argument callable
@return: A C{Deferred} which becomes, or a callable which
takes no arguments and returns an object implementing C{IMessage}.
This will be called and the returned object used to deliver the
message when it arrives.
@raise SMTPBadRcpt: Raised if messages to the address are
not to be accepted.
"""
def validateFrom(helo, origin):
"""
Validate the address from which the message originates.
@type helo: C{(str, str)}
@param helo: The argument to the HELO command and the client's IP
address.
@type origin: C{Address}
@param origin: The address the message is from
@rtype: C{Deferred} or C{Address}
@return: C{origin} or a C{Deferred} whose callback will be
passed C{origin}.
@raise SMTPBadSender: Raised of messages from this address are
not to be accepted.
"""
class IMessageDeliveryFactory(Interface):
"""An alternate interface to implement for handling message delivery.
It is useful to implement this interface instead of L{IMessageDelivery}
directly because it allows the implementor to distinguish between
different messages delivery over the same connection. This can be
used to optimize delivery of a single message to multiple recipients,
something which cannot be done by L{IMessageDelivery} implementors
due to their lack of information.
"""
def getMessageDelivery():
"""Return an L{IMessageDelivery} object.
This will be called once per message.
"""
class SMTPError(Exception):
pass
class SMTPClientError(SMTPError):
"""Base class for SMTP client errors.
"""
def __init__(self, code, resp, log=None, addresses=None, isFatal=False, retry=False):
"""
@param code: The SMTP response code associated with this error.
@param resp: The string response associated with this error.
@param log: A string log of the exchange leading up to and including
the error.
@type log: L{str}
@param isFatal: A boolean indicating whether this connection can
proceed or not. If True, the connection will be dropped.
@param retry: A boolean indicating whether the delivery should be
retried. If True and the factory indicates further retries are
desirable, they will be attempted, otherwise the delivery will
be failed.
"""
self.code = code
self.resp = resp
self.log = log
self.addresses = addresses
self.isFatal = isFatal
self.retry = retry
def __str__(self):
if self.code > 0:
res = ["%.3d %s" % (self.code, self.resp)]
else:
res = [self.resp]
if self.log:
res.append(self.log)
res.append('')
return '\n'.join(res)
class ESMTPClientError(SMTPClientError):
"""Base class for ESMTP client errors.
"""
class EHLORequiredError(ESMTPClientError):
"""The server does not support EHLO.
This is considered a non-fatal error (the connection will not be
dropped).
"""
class AUTHRequiredError(ESMTPClientError):
"""Authentication was required but the server does not support it.
This is considered a non-fatal error (the connection will not be
dropped).
"""
class TLSRequiredError(ESMTPClientError):
"""Transport security was required but the server does not support it.
This is considered a non-fatal error (the connection will not be
dropped).
"""
class AUTHDeclinedError(ESMTPClientError):
"""The server rejected our credentials.
Either the username, password, or challenge response
given to the server was rejected.
This is considered a non-fatal error (the connection will not be
dropped).
"""
class AuthenticationError(ESMTPClientError):
"""An error ocurred while authenticating.
Either the server rejected our request for authentication or the
challenge received was malformed.
This is considered a non-fatal error (the connection will not be
dropped).
"""
class TLSError(ESMTPClientError):
"""An error occurred while negiotiating for transport security.
This is considered a non-fatal error (the connection will not be
dropped).
"""
class SMTPConnectError(SMTPClientError):
"""Failed to connect to the mail exchange host.
This is considered a fatal error. A retry will be made.
"""
def __init__(self, code, resp, log=None, addresses=None, isFatal=True, retry=True):
SMTPClientError.__init__(self, code, resp, log, addresses, isFatal, retry)
class SMTPTimeoutError(SMTPClientError):
"""Failed to receive a response from the server in the expected time period.
This is considered a fatal error. A retry will be made.
"""
def __init__(self, code, resp, log=None, addresses=None, isFatal=True, retry=True):
SMTPClientError.__init__(self, code, resp, log, addresses, isFatal, retry)
class SMTPProtocolError(SMTPClientError):
"""The server sent a mangled response.
This is considered a fatal error. A retry will not be made.
"""
def __init__(self, code, resp, log=None, addresses=None, isFatal=True, retry=False):
SMTPClientError.__init__(self, code, resp, log, addresses, isFatal, retry)
class SMTPDeliveryError(SMTPClientError):
"""Indicates that a delivery attempt has had an error.
"""
class SMTPServerError(SMTPError):
def __init__(self, code, resp):
self.code = code
self.resp = resp
def __str__(self):
return "%.3d %s" % (self.code, self.resp)
class SMTPAddressError(SMTPServerError):
def __init__(self, addr, code, resp):
SMTPServerError.__init__(self, code, resp)
self.addr = Address(addr)
def __str__(self):
return "%.3d <%s>... %s" % (self.code, self.addr, self.resp)
class SMTPBadRcpt(SMTPAddressError):
def __init__(self, addr, code=550,
resp='Cannot receive for specified address'):
SMTPAddressError.__init__(self, addr, code, resp)
class SMTPBadSender(SMTPAddressError):
def __init__(self, addr, code=550, resp='Sender not acceptable'):
SMTPAddressError.__init__(self, addr, code, resp)
def rfc822date(timeinfo=None,local=1):
"""
Format an RFC-2822 compliant date string.
@param timeinfo: (optional) A sequence as returned by C{time.localtime()}
or C{time.gmtime()}. Default is now.
@param local: (optional) Indicates if the supplied time is local or
universal time, or if no time is given, whether now should be local or
universal time. Default is local, as suggested (SHOULD) by rfc-2822.
@returns: A string representing the time and date in RFC-2822 format.
"""
if not timeinfo:
if local:
timeinfo = time.localtime()
else:
timeinfo = time.gmtime()
if local:
if timeinfo[8]:
# DST
tz = -time.altzone
else:
tz = -time.timezone
(tzhr, tzmin) = divmod(abs(tz), 3600)
if tz:
tzhr *= int(abs(tz)/tz)
(tzmin, tzsec) = divmod(tzmin, 60)
else:
(tzhr, tzmin) = (0,0)
return "%s, %02d %s %04d %02d:%02d:%02d %+03d%02d" % (
['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timeinfo[6]],
timeinfo[2],
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timeinfo[1] - 1],
timeinfo[0], timeinfo[3], timeinfo[4], timeinfo[5],
tzhr, tzmin)
def idGenerator():
i = 0
while True:
yield i
i += 1
def messageid(uniq=None, N=idGenerator().next):
"""Return a globally unique random string in RFC 2822 Message-ID format
<<EMAIL>>
Optional uniq string will be added to strenghten uniqueness if given.
"""
datetime = time.strftime('%Y%m%d%H%M%S', time.gmtime())
pid = os.getpid()
rand = random.randrange(2**31L-1)
if uniq is None:
uniq = ''
else:
uniq = '.' + uniq
return '<%s.%s.%s%s.%s@%s>' % (datetime, pid, rand, uniq, N(), DNSNAME)
def quoteaddr(addr):
"""Turn an email address, possibly with realname part etc, into
a form suitable for and SMTP envelope.
"""
if isinstance(addr, Address):
return '<%s>' % str(addr)
res = rfc822.parseaddr(addr)
if res == (None, None):
# It didn't parse, use it as-is
return '<%s>' % str(addr)
else:
return '<%s>' % str(res[1])
COMMAND, DATA, AUTH = 'COMMAND', 'DATA', 'AUTH'
class AddressError(SMTPError):
"Parse error in address"
# Character classes for parsing addresses
atom = r"[-A-Za-z0-9!\#$%&'*+/=?^_`{|}~]"
class Address:
"""Parse and hold an RFC 2821 address.
Source routes are stipped and ignored, UUCP-style bang-paths
and %-style routing are not parsed.
@type domain: C{str}
@ivar domain: The domain within which this address resides.
@type local: C{str}
@ivar local: The local (\"user\") portion of this address.
"""
tstring = re.compile(r'''( # A string of
(?:"[^"]*" # quoted string
|\\. # backslash-escaped characted
|''' + atom + r''' # atom character
)+|.) # or any single character''',re.X)
atomre = re.compile(atom) # match any one atom character
def __init__(self, addr, defaultDomain=None):
if isinstance(addr, User):
addr = addr.dest
if isinstance(addr, Address):
self.__dict__ = addr.__dict__.copy()
return
elif not isinstance(addr, types.StringTypes):
addr = str(addr)
self.addrstr = addr
# Tokenize
atl = filter(None,self.tstring.split(addr))
local = []
domain = []
while atl:
if atl[0] == '<':
if atl[-1] != '>':
raise AddressError, "Unbalanced <>"
atl = atl[1:-1]
elif atl[0] == '@':
atl = atl[1:]
if not local:
# Source route
while atl and atl[0] != ':':
# remove it
atl = atl[1:]
if not atl:
raise AddressError, "Malformed source route"
atl = atl[1:] # remove :
elif domain:
raise AddressError, "Too many @"
else:
# Now in domain
domain = ['']
elif len(atl[0]) == 1 and not self.atomre.match(atl[0]) and atl[0] != '.':
raise AddressError, "Parse error at %r of %r" % (atl[0], (addr, atl))
else:
if not domain:
local.append(atl[0])
else:
domain.append(atl[0])
atl = atl[1:]
self.local = ''.join(local)
self.domain = ''.join(domain)
if self.local != '' and self.domain == '':
if defaultDomain is None:
defaultDomain = DNSNAME
self.domain = defaultDomain
dequotebs = re.compile(r'\\(.)')
def dequote(self,addr):
"""Remove RFC-2821 quotes from address."""
res = []
atl = filter(None,self.tstring.split(str(addr)))
for t in atl:
if t[0] == '"' and t[-1] == '"':
res.append(t[1:-1])
elif '\\' in t:
res.append(self.dequotebs.sub(r'\1',t))
else:
res.append(t)
return ''.join(res)
def __str__(self):
if self.local or self.domain:
return '@'.join((self.local, self.domain))
else:
return ''
def __repr__(self):
return "%s.%s(%s)" % (self.__module__, self.__class__.__name__,
repr(str(self)))
class User:
"""Hold information about and SMTP message recipient,
including information on where the message came from
"""
def __init__(self, destination, helo, protocol, orig):
host = getattr(protocol, 'host', None)
self.dest = Address(destination, host)
self.helo = helo
self.protocol = protocol
if isinstance(orig, Address):
self.orig = orig
else:
self.orig = Address(orig, host)
def __getstate__(self):
"""Helper for pickle.
protocol isn't picklabe, but we want User to be, so skip it in
the pickle.
"""
return { 'dest' : self.dest,
'helo' : self.helo,
'protocol' : None,
'orig' : self.orig }
def __str__(self):
return str(self.dest)
class IMessage(Interface):
"""Interface definition for messages that can be sent via SMTP."""
def lineReceived(line):
"""handle another line"""
def eomReceived():
"""handle end of message
return a deferred. The deferred should be called with either:
callback(string) or errback(error)
"""
def connectionLost():
"""handle message truncated
semantics should be to discard the message
"""
class SMTP(basic.LineOnlyReceiver, policies.TimeoutMixin):
"""SMTP server-side protocol."""
timeout = 600
host = DNSNAME
portal = None
# Control whether we log SMTP events
noisy = True
# A factory for IMessageDelivery objects. If an
# avatar implementing IMessageDeliveryFactory can
# be acquired from the portal, it will be used to
# create a new IMessageDelivery object for each
# message which is received.
deliveryFactory = None
# An IMessageDelivery object. A new instance is
# used for each message received if we can get an
# IMessageDeliveryFactory from the portal. Otherwise,
# a single instance is used throughout the lifetime
# of the connection.
delivery = None
# Cred cleanup function.
_onLogout = None
def __init__(self, delivery=None, deliveryFactory=None):
self.mode = COMMAND
self._from = None
self._helo = None
self._to = []
self.delivery = delivery
self.deliveryFactory = deliveryFactory
def timeoutConnection(self):
msg = '%s Timeout. Try talking faster next time!' % (self.host,)
self.sendCode(421, msg)
self.transport.loseConnection()
def greeting(self):
return '%s NO UCE NO UBE NO RELAY PROBES' % (self.host,)
def connectionMade(self):
# Ensure user-code always gets something sane for _helo
peer = self.transport.getPeer()
try:
host = peer.host
except AttributeError: # not an IPv4Address
host = str(peer)
self._helo = (None, host)
self.sendCode(220, self.greeting())
self.setTimeout(self.timeout)
def sendCode(self, code, message=''):
"Send an SMTP code with a message."
lines = message.splitlines()
lastline = lines[-1:]
for line in lines[:-1]:
self.sendLine('%3.3d-%s' % (code, line))
self.sendLine('%3.3d %s' % (code,
lastline and lastline[0] or ''))
def lineReceived(self, line):
self.resetTimeout()
return getattr(self, 'state_' + self.mode)(line)
def state_COMMAND(self, line):
# Ignore leading and trailing whitespace, as well as an arbitrary
# amount of whitespace between the command and its argument, though
# it is not required by the protocol, for it is a nice thing to do.
line = line.strip()
parts = line.split(None, 1)
if parts:
method = self.lookupMethod(parts[0]) or self.do_UNKNOWN
if len(parts) == 2:
method(parts[1])
else:
method('')
else:
self.sendSyntaxError()
def sendSyntaxError(self):
self.sendCode(500, 'Error: bad syntax')
def lookupMethod(self, command):
return getattr(self, 'do_' + command.upper(), None)
def lineLengthExceeded(self, line):
if self.mode is DATA:
for message in self.__messages:
message.connectionLost()
self.mode = COMMAND
del self.__messages
self.sendCode(500, 'Line too long')
def do_UNKNOWN(self, rest):
self.sendCode(500, 'Command not implemented')
def do_HELO(self, rest):
peer = self.transport.getPeer()
try:
host = peer.host
except AttributeError:
host = str(peer)
self._helo = (rest, host)
self._from = None
self._to = []
self.sendCode(250, '%s Hello %s, nice to meet you' % (self.host, host))
def do_QUIT(self, rest):
self.sendCode(221, 'See you later')
self.transport.loseConnection()
# A string of quoted strings, backslash-escaped character or
# atom characters + '@.,:'
qstring = r'("[^"]*"|\\.|' + atom + r'|[@.,:])+'
mail_re = re.compile(r'''\s*FROM:\s*(?P<path><> # Empty <>
|<''' + qstring + r'''> # <addr>
|''' + qstring + r''' # addr
)\s*(\s(?P<opts>.*))? # Optional WS + ESMTP options
$''',re.I|re.X)
rcpt_re = re.compile(r'\s*TO:\s*(?P<path><' + qstring + r'''> # <addr>
|''' + qstring + r''' # addr
)\s*(\s(?P<opts>.*))? # Optional WS + ESMTP options
$''',re.I|re.X)
def do_MAIL(self, rest):
if self._from:
self.sendCode(503,"Only one sender per message, please")
return
# Clear old recipient list
self._to = []
m = self.mail_re.match(rest)
if not m:
self.sendCode(501, "Syntax error")
return
try:
addr = Address(m.group('path'), self.host)
except AddressError, e:
self.sendCode(553, str(e))
return
validated = defer.maybeDeferred(self.validateFrom, self._helo, addr)
validated.addCallbacks(self._cbFromValidate, self._ebFromValidate)
def _cbFromValidate(self, from_, code=250, msg='Sender address accepted'):
self._from = from_
self.sendCode(code, msg)
def _ebFromValidate(self, failure):
if failure.check(SMTPBadSender):
self.sendCode(failure.value.code,
'Cannot receive from specified address %s: %s'
% (quoteaddr(failure.value.addr), failure.value.resp))
elif failure.check(SMTPServerError):
self.sendCode(failure.value.code, failure.value.resp)
else:
log.err(failure, "SMTP sender validation failure")
self.sendCode(
451,
'Requested action aborted: local error in processing')
def do_RCPT(self, rest):
if not self._from:
self.sendCode(503, "Must have sender before recipient")
return
m = self.rcpt_re.match(rest)
if not m:
self.sendCode(501, "Syntax error")
return
try:
user = User(m.group('path'), self._helo, self, self._from)
except AddressError, e:
self.sendCode(553, str(e))
return
d = defer.maybeDeferred(self.validateTo, user)
d.addCallbacks(
self._cbToValidate,
self._ebToValidate,
callbackArgs=(user,)
)
def _cbToValidate(self, to, user=None, code=250, msg='Recipient address accepted'):
if user is None:
user = to
self._to.append((user, to))
self.sendCode(code, msg)
def _ebToValidate(self, failure):
if failure.check(SMTPBadRcpt, SMTPServerError):
self.sendCode(failure.value.code, failure.value.resp)
else:
log.err(failure)
self.sendCode(
451,
'Requested action aborted: local error in processing'
)
def _disconnect(self, msgs):
for msg in msgs:
try:
msg.connectionLost()
except:
log.msg("msg raised exception from connectionLost")
log.err()
def do_DATA(self, rest):
if self._from is None or (not self._to):
self.sendCode(503, 'Must have valid receiver and originator')
return
self.mode = DATA
helo, origin = self._helo, self._from
recipients = self._to
self._from = None
self._to = []
self.datafailed = None
msgs = []
for (user, msgFunc) in recipients:
try:
msg = msgFunc()
rcvdhdr = self.receivedHeader(helo, origin, [user])
if rcvdhdr:
msg.lineReceived(rcvdhdr)
msgs.append(msg)
except SMTPServerError, e:
self.sendCode(e.code, e.resp)
self.mode = COMMAND
self._disconnect(msgs)
return
except:
log.err()
self.sendCode(550, "Internal server error")
self.mode = COMMAND
self._disconnect(msgs)
return
self.__messages = msgs
self.__inheader = self.__inbody = 0
self.sendCode(354, 'Continue')
if self.noisy:
fmt = 'Receiving message for delivery: from=%s to=%s'
log.msg(fmt % (origin, [str(u) for (u, f) in recipients]))
def connectionLost(self, reason):
# self.sendCode(421, 'Dropping connection.') # This does nothing...
# Ideally, if we (rather than the other side) lose the connection,
# we should be able to tell the other side that we are going away.
# RFC-2821 requires that we try.
if self.mode is DATA:
try:
for message in self.__messages:
try:
message.connectionLost()
except:
log.err()
del self.__messages
except AttributeError:
pass
if self._onLogout:
self._onLogout()
self._onLogout = None
self.setTimeout(None)
def do_RSET(self, rest):
self._from = None
self._to = []
self.sendCode(250, 'I remember nothing.')
def dataLineReceived(self, line):
if line[:1] == '.':
if line == '.':
self.mode = COMMAND
if self.datafailed:
self.sendCode(self.datafailed.code,
self.datafailed.resp)
return
if not self.__messages:
self._messageHandled("thrown away")
return
defer.DeferredList([
m.eomReceived() for m in self.__messages
], consumeErrors=True).addCallback(self._messageHandled
)
del self.__messages
return
line = line[1:]
if self.datafailed:
return
try:
# Add a blank line between the generated Received:-header
# and the message body if the message comes in without any
# headers
if not self.__inheader and not self.__inbody:
if ':' in line:
self.__inheader = 1
elif line:
for message in self.__messages:
message.lineReceived('')
self.__inbody = 1
if not line:
self.__inbody = 1
for message in self.__messages:
message.lineReceived(line)
except SMTPServerError, e:
self.datafailed = e
for message in self.__messages:
message.connectionLost()
state_DATA = dataLineReceived
def _messageHandled(self, resultList):
failures = 0
for (success, result) in resultList:
if not success:
failures += 1
log.err(result)
if failures:
msg = 'Could not send e-mail'
L = len(resultList)
if L > 1:
msg += ' (%d failures out of %d recipients)' % (failures, L)
self.sendCode(550, msg)
else:
self.sendCode(250, 'Delivery in progress')
def _cbAnonymousAuthentication(self, (iface, avatar, logout)):
"""
Save the state resulting from a successful anonymous cred login.
"""
if issubclass(iface, IMessageDeliveryFactory):
self.deliveryFactory = avatar
self.delivery = None
elif issubclass(iface, IMessageDelivery):
self.deliveryFactory = None
self.delivery = avatar
else:
raise RuntimeError("%s is not a supported interface" % (iface.__name__,))
self._onLogout = logout
self.challenger = None
# overridable methods:
def validateFrom(self, helo, origin):
"""
Validate the address from which the message originates.
@type helo: C{(str, str)}
@param helo: The argument to the HELO command and the client's IP
address.
@type origin: C{Address}
@param origin: The address the message is from
@rtype: C{Deferred} or C{Address}
@return: C{origin} or a C{Deferred} whose callback will be
passed C{origin}.
@raise SMTPBadSender: Raised of messages from this address are
not to be accepted.
"""
if self.deliveryFactory is not None:
self.delivery = self.deliveryFactory.getMessageDelivery()
if self.delivery is not None:
return defer.maybeDeferred(self.delivery.validateFrom,
helo, origin)
# No login has been performed, no default delivery object has been
# provided: try to perform an anonymous login and then invoke this
# method again.
if self.portal:
result = self.portal.login(
cred.credentials.Anonymous(),
None,
IMessageDeliveryFactory, IMessageDelivery)
def ebAuthentication(err):
"""
Translate cred exceptions into SMTP exceptions so that the
protocol code which invokes C{validateFrom} can properly report
the failure.
"""
if err.check(cred.error.UnauthorizedLogin):
exc = SMTPBadSender(origin)
elif err.check(cred.error.UnhandledCredentials):
exc = SMTPBadSender(
origin, resp="Unauthenticated senders not allowed")
else:
return err
return defer.fail(exc)
result.addCallbacks(
self._cbAnonymousAuthentication, ebAuthentication)
def continueValidation(ignored):
"""
Re-attempt from address validation.
"""
return self.validateFrom(helo, origin)
result.addCallback(continueValidation)
return result
raise SMTPBadSender(origin)
def validateTo(self, user):
"""
Validate the address for which the message is destined.
@type user: C{User}
@param user: The address to validate.
@rtype: no-argument callable
@return: A C{Deferred} which becomes, or a callable which
takes no arguments and returns an object implementing C{IMessage}.
This will be called and the returned object used to deliver the
message when it arrives.
@raise SMTPBadRcpt: Raised if messages to the address are
not to be accepted.
"""
if self.delivery is not None:
return self.delivery.validateTo(user)
raise SMTPBadRcpt(user)
def receivedHeader(self, helo, origin, recipients):
if self.delivery is not None:
return self.delivery.receivedHeader(helo, origin, recipients)
heloStr = ""
if helo[0]:
heloStr = " helo=%s" % (helo[0],)
domain = self.transport.getHost().host
from_ = "from %s ([%s]%s)" % (helo[0], helo[1], heloStr)
by = "by %s with %s (%s)" % (domain,
self.__class__.__name__,
longversion)
for_ = "for %s; %s" % (' '.join(map(str, recipients)),
rfc822date())
return "Received: %s\n\t%s\n\t%s" % (from_, by, for_)
def startMessage(self, recipients):
if self.delivery:
return self.delivery.startMessage(recipients)
return []
class SMTPFactory(protocol.ServerFactory):
"""Factory for SMTP."""
# override in instances or subclasses
domain = DNSNAME
timeout = 600
protocol = SMTP
portal = None
def __init__(self, portal = None):
self.portal = portal
def buildProtocol(self, addr):
p = protocol.ServerFactory.buildProtocol(self, addr)
p.portal = self.portal
p.host = self.domain
return p
class SMTPClient(basic.LineReceiver, policies.TimeoutMixin):
"""
SMTP client for sending emails.
After the client has connected to the SMTP server, it repeatedly calls
L{SMTPClient.getMailFrom}, L{SMTPClient.getMailTo} and
L{SMTPClient.getMailData} and uses this information to send an email.
It then calls L{SMTPClient.getMailFrom} again; if it returns C{None}, the
client will disconnect, otherwise it will continue as normal i.e. call
L{SMTPClient.getMailTo} and L{SMTPClient.getMailData} and send a new email.
"""
# If enabled then log SMTP client server communication
debug = True
# Number of seconds to wait before timing out a connection. If
# None, perform no timeout checking.
timeout = None
def __init__(self, identity, logsize=10):
self.identity = identity or ''
self.toAddressesResult = []
self.successAddresses = []
self._from = None
self.resp = []
self.code = -1
self.log = util.LineLog(logsize)
def sendLine(self, line):
# Log sendLine only if you are in debug mode for performance
if self.debug:
self.log.append('>>> ' + line)
basic.LineReceiver.sendLine(self,line)
def connectionMade(self):
self.setTimeout(self.timeout)
self._expected = [ 220 ]
self._okresponse = self.smtpState_helo
self._failresponse = self.smtpConnectionFailed
def connectionLost(self, reason=protocol.connectionDone):
"""We are no longer connected"""
self.setTimeout(None)
self.mailFile = None
def timeoutConnection(self):
self.sendError(
SMTPTimeoutError(
-1, "Timeout waiting for SMTP server response",
self.log.str()))
def lineReceived(self, line):
self.resetTimeout()
# Log lineReceived only if you are in debug mode for performance
if self.debug:
self.log.append('<<< ' + line)
why = None
try:
self.code = int(line[:3])
except ValueError:
# This is a fatal error and will disconnect the transport lineReceived will not be called again
self.sendError(SMTPProtocolError(-1, "Invalid response from SMTP server: %s" % line, self.log.str()))
return
if line[0] == '0':
# Verbose informational message, ignore it
return
self.resp.append(line[4:])
if line[3:4] == '-':
# continuation
return
if self.code in self._expected:
why = self._okresponse(self.code,'\n'.join(self.resp))
else:
why = self._failresponse(self.code,'\n'.join(self.resp))
self.code = -1
self.resp = []
return why
def smtpConnectionFailed(self, code, resp):
self.sendError(SMTPConnectError(code, resp, self.log.str()))
def smtpTransferFailed(self, code, resp):
if code < 0:
self.sendError(SMTPProtocolError(code, resp, self.log.str()))
else:
self.smtpState_msgSent(code, resp)
def smtpState_helo(self, code, resp):
self.sendLine('HELO ' + self.identity)
self._expected = SUCCESS
self._okresponse = self.smtpState_from
def smtpState_from(self, code, resp):
self._from = self.getMailFrom()
self._failresponse = self.smtpTransferFailed
if self._from is not None:
self.sendLine('MAIL FROM:%s' % quoteaddr(self._from))
self._expected = [250]
self._okresponse = self.smtpState_to
else:
# All messages have been sent, disconnect
self._disconnectFromServer()
def smtpState_disconnect(self, code, resp):
self.transport.loseConnection()
def smtpState_to(self, code, resp):
self.toAddresses = iter(self.getMailTo())
self.toAddressesResult = []
self.successAddresses = []
self._okresponse = self.smtpState_toOrData
self._expected = xrange(0,1000)
self.lastAddress = None
return self.smtpState_toOrData(0, '')
def smtpState_toOrData(self, code, resp):
if self.lastAddress is not None:
self.toAddressesResult.append((self.lastAddress, code, resp))
if code in SUCCESS:
self.successAddresses.append(self.lastAddress)
try:
self.lastAddress = self.toAddresses.next()
except StopIteration:
if self.successAddresses:
self.sendLine('DATA')
self._expected = [ 354 ]
self._okresponse = self.smtpState_data
else:
return self.smtpState_msgSent(code,'No recipients accepted')
else:
self.sendLine('RCPT TO:%s' % quoteaddr(self.lastAddress))
def smtpState_data(self, code, resp):
s = basic.FileSender()
d = s.beginFileTransfer(
self.getMailData(), self.transport, self.transformChunk)
def ebTransfer(err):
self.sendError(err.value)
d.addCallbacks(self.finishedFileTransfer, ebTransfer)
self._expected = SUCCESS
self._okresponse = self.smtpState_msgSent
def smtpState_msgSent(self, code, resp):
if self._from is not None:
self.sentMail(code, resp, len(self.successAddresses),
self.toAddressesResult, self.log)
self.toAddressesResult = []
self._from = None
self.sendLine('RSET')
self._expected = SUCCESS
self._okresponse = self.smtpState_from
##
## Helpers for FileSender
##
def transformChunk(self, chunk):
"""
Perform the necessary local to network newline conversion and escape
leading periods.
This method also resets the idle timeout so that as long as process is
being made sending the message body, the client will not time out.
"""
self.resetTimeout()
return chunk.replace('\n', '\r\n').replace('\r\n.', '\r\n..')
def finishedFileTransfer(self, lastsent):
if lastsent != '\n':
line = '\r\n.'
else:
line = '.'
self.sendLine(line)
##
# these methods should be overriden in subclasses
def getMailFrom(self):
"""Return the email address the mail is from."""
raise NotImplementedError
def getMailTo(self):
"""Return a list of emails to send to."""
raise NotImplementedError
def getMailData(self):
"""Return file-like object containing data of message to be sent.
Lines in the file should be delimited by '\\n'.
"""
raise NotImplementedError
def sendError(self, exc):
"""
If an error occurs before a mail message is sent sendError will be
called. This base class method sends a QUIT if the error is
non-fatal and disconnects the connection.
@param exc: The SMTPClientError (or child class) raised
@type exc: C{SMTPClientError}
"""
if isinstance(exc, SMTPClientError) and not exc.isFatal:
self._disconnectFromServer()
else:
# If the error was fatal then the communication channel with the
# SMTP Server is broken so just close the transport connection
self.smtpState_disconnect(-1, None)
def sentMail(self, code, resp, numOk, addresses, log):
"""Called when an attempt to send an email is completed.
If some addresses were accepted, code and resp are the response
to the DATA command. If no addresses were accepted, code is -1
and resp is an informative message.
@param code: the code returned by the SMTP Server
@param resp: The string response returned from the SMTP Server
@param numOK: the number of addresses accepted by the remote host.
@param addresses: is a list of tuples (address, code, resp) listing
the response to each RCPT command.
@param log: is the SMTP session log
"""
raise NotImplementedError
def _disconnectFromServer(self):
self._expected = xrange(0, 1000)
self._okresponse = self.smtpState_disconnect
self.sendLine('QUIT')
class ESMTPClient(SMTPClient):
# Fall back to HELO if the server does not support EHLO
heloFallback = True
# Refuse to proceed if authentication cannot be performed
requireAuthentication = False
# Refuse to proceed if TLS is not available
requireTransportSecurity = False
# Indicate whether or not our transport can be considered secure.
tlsMode = False
# ClientContextFactory to use for STARTTLS
context = None
def __init__(self, secret, contextFactory=None, *args, **kw):
SMTPClient.__init__(self, *args, **kw)
self.authenticators = []
self.secret = secret
self.context = contextFactory
self.tlsMode = False
def esmtpEHLORequired(self, code=-1, resp=None):
self.sendError(EHLORequiredError(502, "Server does not support ESMTP Authentication", self.log.str()))
def esmtpAUTHRequired(self, code=-1, resp=None):
tmp = []
for a in self.authenticators:
tmp.append(a.getName().upper())
auth = "[%s]" % ', '.join(tmp)
self.sendError(AUTHRequiredError(502, "Server does not support Client Authentication schemes %s" % auth,
self.log.str()))
def esmtpTLSRequired(self, code=-1, resp=None):
self.sendError(TLSRequiredError(502, "Server does not support secure communication via TLS / SSL",
self.log.str()))
def esmtpTLSFailed(self, code=-1, resp=None):
self.sendError(TLSError(code, "Could not complete the SSL/TLS handshake", self.log.str()))
def esmtpAUTHDeclined(self, code=-1, resp=None):
self.sendError(AUTHDeclinedError(code, resp, self.log.str()))
def esmtpAUTHMalformedChallenge(self, code=-1, resp=None):
str = "Login failed because the SMTP Server returned a malformed Authentication Challenge"
self.sendError(AuthenticationError(501, str, self.log.str()))
def esmtpAUTHServerError(self, code=-1, resp=None):
self.sendError(AuthenticationError(code, resp, self.log.str()))
def registerAuthenticator(self, auth):
"""Registers an Authenticator with the ESMTPClient. The ESMTPClient
will attempt to login to the SMTP Server in the order the
Authenticators are registered. The most secure Authentication
mechanism should be registered first.
@param auth: The Authentication mechanism to register
@type auth: class implementing C{IClientAuthentication}
"""
self.authenticators.append(auth)
def connectionMade(self):
SMTPClient.connectionMade(self)
self._okresponse = self.esmtpState_ehlo
def esmtpState_ehlo(self, code, resp):
self._expected = SUCCESS
self._okresponse = self.esmtpState_serverConfig
self._failresponse = self.esmtpEHLORequired
if self.heloFallback:
self._failresponse = self.smtpState_helo
self.sendLine('EHLO ' + self.identity)
def esmtpState_serverConfig(self, code, resp):
items = {}
for line in resp.splitlines():
e = line.split(None, 1)
if len(e) > 1:
items[e[0]] = e[1]
else:
items[e[0]] = None
if self.tlsMode:
self.authenticate(code, resp, items)
else:
self.tryTLS(code, resp, items)
def tryTLS(self, code, resp, items):
if self.context and 'STARTTLS' in items:
self._expected = [220]
self._okresponse = self.esmtpState_starttls
self._failresponse = self.esmtpTLSFailed
self.sendLine('STARTTLS')
elif self.requireTransportSecurity:
self.tlsMode = False
self.esmtpTLSRequired()
else:
self.tlsMode = False
self.authenticate(code, resp, items)
def esmtpState_starttls(self, code, resp):
try:
self.transport.startTLS(self.context)
self.tlsMode = True
except:
log.err()
self.esmtpTLSFailed(451)
# Send another EHLO once TLS has been started to
# get the TLS / AUTH schemes. Some servers only allow AUTH in TLS mode.
self.esmtpState_ehlo(code, resp)
def authenticate(self, code, resp, items):
if self.secret and items.get('AUTH'):
schemes = items['AUTH'].split()
tmpSchemes = {}
#XXX: May want to come up with a more efficient way to do this
for s in schemes:
tmpSchemes[s.upper()] = 1
for a in self.authenticators:
auth = a.getName().upper()
if auth in tmpSchemes:
self._authinfo = a
# Special condition handled
if auth == "PLAIN":
self._okresponse = self.smtpState_from
self._failresponse = self._esmtpState_plainAuth
self._expected = [235]
challenge = encode_base64(self._authinfo.challengeResponse(self.secret, 1), eol="")
self.sendLine('AUTH ' + auth + ' ' + challenge)
else:
self._expected = [334]
self._okresponse = self.esmtpState_challenge
# If some error occurs here, the server declined the AUTH
# before the user / password phase. This would be
# a very rare case
self._failresponse = self.esmtpAUTHServerError
self.sendLine('AUTH ' + auth)
return
if self.requireAuthentication:
self.esmtpAUTHRequired()
else:
self.smtpState_from(code, resp)
def _esmtpState_plainAuth(self, code, resp):
self._okresponse = self.smtpState_from
self._failresponse = self.esmtpAUTHDeclined
self._expected = [235]
challenge = encode_base64(self._authinfo.challengeResponse(self.secret, 2), eol="")
self.sendLine('AUTH PLAIN ' + challenge)
def esmtpState_challenge(self, code, resp):
self._authResponse(self._authinfo, resp)
def _authResponse(self, auth, challenge):
self._failresponse = self.esmtpAUTHDeclined
try:
challenge = base64.decodestring(challenge)
except binascii.Error:
# Illegal challenge, give up, then quit
self.sendLine('*')
self._okresponse = self.esmtpAUTHMalformedChallenge
self._failresponse = self.esmtpAUTHMalformedChallenge
else:
resp = auth.challengeResponse(self.secret, challenge)
self._expected = [235, 334]
self._okresponse = self.smtpState_maybeAuthenticated
self.sendLine(encode_base64(resp, eol=""))
def smtpState_maybeAuthenticated(self, code, resp):
"""
Called to handle the next message from the server after sending a
response to a SASL challenge. The server response might be another
challenge or it might indicate authentication has succeeded.
"""
if code == 235:
# Yes, authenticated!
del self._authinfo
self.smtpState_from(code, resp)
else:
# No, not authenticated yet. Keep trying.
self._authResponse(self._authinfo, resp)
class ESMTP(SMTP):
ctx = None
canStartTLS = False
startedTLS = False
authenticated = False
def __init__(self, chal = None, contextFactory = None):
SMTP.__init__(self)
if chal is None:
chal = {}
self.challengers = chal
self.authenticated = False
self.ctx = contextFactory
def connectionMade(self):
SMTP.connectionMade(self)
self.canStartTLS = ITLSTransport.providedBy(self.transport)
self.canStartTLS = self.canStartTLS and (self.ctx is not None)
def greeting(self):
return SMTP.greeting(self) + ' ESMTP'
def extensions(self):
ext = {'AUTH': self.challengers.keys()}
if self.canStartTLS and not self.startedTLS:
ext['STARTTLS'] = None
return ext
def lookupMethod(self, command):
m = SMTP.lookupMethod(self, command)
if m is None:
m = getattr(self, 'ext_' + command.upper(), None)
return m
def listExtensions(self):
r = []
for (c, v) in self.extensions().iteritems():
if v is not None:
if v:
# Intentionally omit extensions with empty argument lists
r.append('%s %s' % (c, ' '.join(v)))
else:
r.append(c)
return '\n'.join(r)
def do_EHLO(self, rest):
peer = self.transport.getPeer().host
self._helo = (rest, peer)
self._from = None
self._to = []
self.sendCode(
250,
'%s Hello %s, nice to meet you\n%s' % (
self.host, peer,
self.listExtensions(),
)
)
def ext_STARTTLS(self, rest):
if self.startedTLS:
self.sendCode(503, 'TLS already negotiated')
elif self.ctx and self.canStartTLS:
self.sendCode(220, 'Begin TLS negotiation now')
self.transport.startTLS(self.ctx)
self.startedTLS = True
else:
self.sendCode(454, 'TLS not available')
def ext_AUTH(self, rest):
if self.authenticated:
self.sendCode(503, 'Already authenticated')
return
parts = rest.split(None, 1)
chal = self.challengers.get(parts[0].upper(), lambda: None)()
if not chal:
self.sendCode(504, 'Unrecognized authentication type')
return
self.mode = AUTH
self.challenger = chal
if len(parts) > 1:
chal.getChallenge() # Discard it, apparently the client does not
# care about it.
rest = parts[1]
else:
rest = None
self.state_AUTH(rest)
def _cbAuthenticated(self, loginInfo):
"""
Save the state resulting from a successful cred login and mark this
connection as authenticated.
"""
result = SMTP._cbAnonymousAuthentication(self, loginInfo)
self.authenticated = True
return result
def _ebAuthenticated(self, reason):
"""
Handle cred login errors by translating them to the SMTP authenticate
failed. Translate all other errors into a generic SMTP error code and
log the failure for inspection. Stop all errors from propagating.
"""
self.challenge = None
if reason.check(cred.error.UnauthorizedLogin):
self.sendCode(535, 'Authentication failed')
else:
log.err(reason, "SMTP authentication failure")
self.sendCode(
451,
'Requested action aborted: local error in processing')
def state_AUTH(self, response):
"""
Handle one step of challenge/response authentication.
@param response: The text of a response. If None, this
function has been called as a result of an AUTH command with
no initial response. A response of '*' aborts authentication,
as per RFC 2554.
"""
if self.portal is None:
self.sendCode(454, 'Temporary authentication failure')
self.mode = COMMAND
return
if response is None:
challenge = self.challenger.getChallenge()
encoded = challenge.encode('base64')
self.sendCode(334, encoded)
return
if response == '*':
self.sendCode(501, 'Authentication aborted')
self.challenger = None
self.mode = COMMAND
return
try:
uncoded = response.decode('base64')
except binascii.Error:
self.sendCode(501, 'Syntax error in parameters or arguments')
self.challenger = None
self.mode = COMMAND
return
self.challenger.setResponse(uncoded)
if self.challenger.moreChallenges():
challenge = self.challenger.getChallenge()
coded = challenge.encode('base64')[:-1]
self.sendCode(334, coded)
return
self.mode = COMMAND
result = self.portal.login(
self.challenger, None,
IMessageDeliveryFactory, IMessageDelivery)
result.addCallback(self._cbAuthenticated)
result.addCallback(lambda ign: self.sendCode(235, 'Authentication successful.'))
result.addErrback(self._ebAuthenticated)
class SenderMixin:
"""Utility class for sending emails easily.
Use with SMTPSenderFactory or ESMTPSenderFactory.
"""
done = 0
def getMailFrom(self):
if not self.done:
self.done = 1
return str(self.factory.fromEmail)
else:
return None
def getMailTo(self):
return self.factory.toEmail
def getMailData(self):
return self.factory.file
def sendError(self, exc):
# Call the base class to close the connection with the SMTP server
SMTPClient.sendError(self, exc)
# Do not retry to connect to SMTP Server if:
# 1. No more retries left (This allows the correct error to be returned to the errorback)
# 2. retry is false
# 3. The error code is not in the 4xx range (Communication Errors)
if (self.factory.retries >= 0 or
(not exc.retry and not (exc.code >= 400 and exc.code < 500))):
self.factory.sendFinished = 1
self.factory.result.errback(exc)
def sentMail(self, code, resp, numOk, addresses, log):
# Do not retry, the SMTP server acknowledged the request
self.factory.sendFinished = 1
if code not in SUCCESS:
errlog = []
for addr, acode, aresp in addresses:
if acode not in SUCCESS:
errlog.append("%s: %03d %s" % (addr, acode, aresp))
errlog.append(log.str())
exc = SMTPDeliveryError(code, resp, '\n'.join(errlog), addresses)
self.factory.result.errback(exc)
else:
self.factory.result.callback((numOk, addresses))
class SMTPSender(SenderMixin, SMTPClient):
"""
SMTP protocol that sends a single email based on information it
gets from its factory, a L{SMTPSenderFactory}.
"""
class SMTPSenderFactory(protocol.ClientFactory):
"""
Utility factory for sending emails easily.
"""
domain = DNSNAME
protocol = SMTPSender
def __init__(self, fromEmail, toEmail, file, deferred, retries=5,
timeout=None):
"""
@param fromEmail: The RFC 2821 address from which to send this
message.
@param toEmail: A sequence of RFC 2821 addresses to which to
send this message.
@param file: A file-like object containing the message to send.
@param deferred: A Deferred to callback or errback when sending
of this message completes.
@param retries: The number of times to retry delivery of this
message.
@param timeout: Period, in seconds, for which to wait for
server responses, or None to wait forever.
"""
assert isinstance(retries, (int, long))
if isinstance(toEmail, types.StringTypes):
toEmail = [toEmail]
self.fromEmail = Address(fromEmail)
self.nEmails = len(toEmail)
self.toEmail = toEmail
self.file = file
self.result = deferred
self.result.addBoth(self._removeDeferred)
self.sendFinished = 0
self.retries = -retries
self.timeout = timeout
def _removeDeferred(self, argh):
del self.result
return argh
def clientConnectionFailed(self, connector, err):
self._processConnectionError(connector, err)
def clientConnectionLost(self, connector, err):
self._processConnectionError(connector, err)
def _processConnectionError(self, connector, err):
if self.retries < self.sendFinished <= 0:
log.msg("SMTP Client retrying server. Retry: %s" % -self.retries)
# Rewind the file in case part of it was read while attempting to
# send the message.
self.file.seek(0, 0)
connector.connect()
self.retries += 1
elif self.sendFinished <= 0:
# If we were unable to communicate with the SMTP server a ConnectionDone will be
# returned. We want a more clear error message for debugging
if err.check(error.ConnectionDone):
err.value = SMTPConnectError(-1, "Unable to connect to server.")
self.result.errback(err.value)
def buildProtocol(self, addr):
p = self.protocol(self.domain, self.nEmails*2+2)
p.factory = self
p.timeout = self.timeout
return p
from twisted.mail.imap4 import IClientAuthentication
from twisted.mail.imap4 import CramMD5ClientAuthenticator, LOGINAuthenticator
class PLAINAuthenticator:
implements(IClientAuthentication)
def __init__(self, user):
self.user = user
def getName(self):
return "PLAIN"
def challengeResponse(self, secret, chal=1):
if chal == 1:
return "%s\0%s\0%s" % (self.user, self.user, secret)
else:
return "%s\0%s" % (self.user, secret)
class ESMTPSender(SenderMixin, ESMTPClient):
requireAuthentication = True
requireTransportSecurity = True
def __init__(self, username, secret, contextFactory=None, *args, **kw):
self.heloFallback = 0
self.username = username
if contextFactory is None:
contextFactory = self._getContextFactory()
ESMTPClient.__init__(self, secret, contextFactory, *args, **kw)
self._registerAuthenticators()
def _registerAuthenticators(self):
# Register Authenticator in order from most secure to least secure
self.registerAuthenticator(CramMD5ClientAuthenticator(self.username))
self.registerAuthenticator(LOGINAuthenticator(self.username))
self.registerAuthenticator(PLAINAuthenticator(self.username))
def _getContextFactory(self):
if self.context is not None:
return self.context
try:
from twisted.internet import ssl
except ImportError:
return None
else:
try:
context = ssl.ClientContextFactory()
context.method = ssl.SSL.TLSv1_METHOD
return context
except AttributeError:
return None
class ESMTPSenderFactory(SMTPSenderFactory):
"""
Utility factory for sending emails easily.
"""
protocol = ESMTPSender
def __init__(self, username, password, fromEmail, toEmail, file,
deferred, retries=5, timeout=None,
contextFactory=None, heloFallback=False,
requireAuthentication=True,
requireTransportSecurity=True):
SMTPSenderFactory.__init__(self, fromEmail, toEmail, file, deferred, retries, timeout)
self.username = username
self.password = password
self._contextFactory = contextFactory
self._heloFallback = heloFallback
self._requireAuthentication = requireAuthentication
self._requireTransportSecurity = requireTransportSecurity
def buildProtocol(self, addr):
p = self.protocol(self.username, self.password, self._contextFactory, self.domain, self.nEmails*2+2)
p.heloFallback = self._heloFallback
p.requireAuthentication = self._requireAuthentication
p.requireTransportSecurity = self._requireTransportSecurity
p.factory = self
p.timeout = self.timeout
return p
def sendmail(smtphost, from_addr, to_addrs, msg, senderDomainName=None, port=25):
"""Send an email
This interface is intended to be a direct replacement for
smtplib.SMTP.sendmail() (with the obvious change that
you specify the smtphost as well). Also, ESMTP options
are not accepted, as we don't do ESMTP yet. I reserve the
right to implement the ESMTP options differently.
@param smtphost: The host the message should be sent to
@param from_addr: The (envelope) address sending this mail.
@param to_addrs: A list of addresses to send this mail to. A string will
be treated as a list of one address
@param msg: The message, including headers, either as a file or a string.
File-like objects need to support read() and close(). Lines must be
delimited by '\\n'. If you pass something that doesn't look like a
file, we try to convert it to a string (so you should be able to
pass an email.Message directly, but doing the conversion with
email.Generator manually will give you more control over the
process).
@param senderDomainName: Name by which to identify. If None, try
to pick something sane (but this depends on external configuration
and may not succeed).
@param port: Remote port to which to connect.
@rtype: L{Deferred}
@returns: A L{Deferred}, its callback will be called if a message is sent
to ANY address, the errback if no message is sent.
The callback will be called with a tuple (numOk, addresses) where numOk
is the number of successful recipient addresses and addresses is a list
of tuples (address, code, resp) giving the response to the RCPT command
for each address.
"""
if not hasattr(msg,'read'):
# It's not a file
msg = StringIO(str(msg))
d = defer.Deferred()
factory = SMTPSenderFactory(from_addr, to_addrs, msg, d)
if senderDomainName is not None:
factory.domain = senderDomainName
reactor.connectTCP(smtphost, port, factory)
return d
##
## Yerg. Codecs!
##
import codecs
def xtext_encode(s, errors=None):
r = []
for ch in s:
o = ord(ch)
if ch == '+' or ch == '=' or o < 33 or o > 126:
r.append('+%02X' % o)
else:
r.append(chr(o))
return (''.join(r), len(s))
def _slowXTextDecode(s, errors=None):
"""
Decode the xtext-encoded string C{s}.
"""
r = []
i = 0
while i < len(s):
if s[i] == '+':
try:
r.append(chr(int(s[i + 1:i + 3], 16)))
except ValueError:
r.append(s[i:i + 3])
i += 3
else:
r.append(s[i])
i += 1
return (''.join(r), len(s))
try:
from twisted.protocols._c_urlarg import unquote as _helper_unquote
except ImportError:
xtext_decode = _slowXTextDecode
else:
def xtext_decode(s, errors=None):
"""
Decode the xtext-encoded string C{s} using a fast extension function.
"""
return (_helper_unquote(s, '+'), len(s))
class xtextStreamReader(codecs.StreamReader):
def decode(self, s, errors='strict'):
return xtext_decode(s)
class xtextStreamWriter(codecs.StreamWriter):
def decode(self, s, errors='strict'):
return xtext_encode(s)
def xtext_codec(name):
if name == 'xtext':
return (xtext_encode, xtext_decode, xtextStreamReader, xtextStreamWriter)
codecs.register(xtext_codec)
|
lambdas/shared/tests/test_preview.py
|
BearerPipelineTest/quilt
| 1,115 |
130123
|
"""
Preview helper functions
"""
import os
import pathlib
from unittest import TestCase
from unittest.mock import patch
import pyarrow.parquet as pq
from py_w3c.validators.html.validator import HTMLValidator
from t4_lambda_shared.preview import (
extract_excel,
extract_fcs,
extract_parquet,
get_bytes,
get_preview_lines,
)
TEST_EXTRACT_PARQUET_MAX_BYTES = 10_000
BASE_DIR = pathlib.Path(__file__).parent / 'data'
ACCEPTABLE_ERROR_MESSAGES = [
'Start tag seen without seeing a doctype first. Expected “<!DOCTYPE html>”.',
'Element “head” is missing a required instance of child element “title”.',
'Element “style” not allowed as child of element “div” in this context. '
'(Suppressing further errors from this subtree.)',
'The “border” attribute on the “table” element is obsolete. Use CSS instead.',
]
def iterate_chunks(file_obj, chunk_size=4096):
return iter(lambda: file_obj.read(chunk_size), b'')
class TestPreview(TestCase):
"""Tests the preview functions"""
def test_extract_parquet(self):
file = BASE_DIR / 'amazon-reviews-1000.snappy.parquet'
cell_value = '<td>TSD Airsoft/Paintball Full-Face Mask, Goggle Lens</td>'
with patch('t4_lambda_shared.preview.get_available_memory') as mem_mock:
mem_mock.return_value = 1
with open(file, mode='rb') as parquet:
body, info = extract_parquet(parquet, max_bytes=TEST_EXTRACT_PARQUET_MAX_BYTES)
assert all(bracket in body for bracket in ('<', '>'))
assert body.count('<') == body.count('>'), \
'expected matching HTML tags'
assert cell_value not in body, 'only expected columns'
assert 'skipped rows' in info['warnings']
with open(file, mode='rb') as parquet:
body, info = extract_parquet(parquet, as_html=True, max_bytes=TEST_EXTRACT_PARQUET_MAX_BYTES)
assert cell_value in body, 'missing expected HTML cell'
with open(file, mode='rb') as parquet:
body, info = extract_parquet(parquet, skip_rows=True, max_bytes=TEST_EXTRACT_PARQUET_MAX_BYTES)
assert 'skipped rows' in info['warnings']
assert cell_value not in body, 'only expected columns'
with open(file, mode='rb') as parquet:
body, info = extract_parquet(parquet, as_html=False, max_bytes=TEST_EXTRACT_PARQUET_MAX_BYTES)
assert all(bracket not in body for bracket in ('<', '>')), \
'did not expect HTML'
parquet_file = pq.ParquetFile(file)
assert all(
column in info['schema']['names']
for column in parquet_file.schema.names
)
assert [
parquet_file.metadata.num_rows, parquet_file.metadata.num_columns
] == info['shape'], 'Unexpected number of rows or columns'
def test_excel(self):
"""test XLS, XLSX parsing"""
test_files = {
"Revised.Haplogroups.1000G.20140205.xlsx": {
"contents": [
"Continent",
"Population",
"ID",
"Macrohaplogroup",
"Haplogroup",
"Informative SNPs",
"NA19239",
"NA19256",
"E1b1a1a1g1a2",
]
},
"lclarke_phase1_sequence_stats_20120330.xls": {
"contents": [
"Breakdown of data generated by project, technology, submitting centre",
"92219554043",
"90363687334"
]
}
}
vld = HTMLValidator()
for file, expected_data in test_files.items():
in_file = os.path.join(BASE_DIR, "excel", file)
with open(in_file, mode="rb") as excel:
for html in [False, True]:
body, _ = extract_excel(excel, as_html=html)
# print(body)
tags = ['<div>', '<tbody>', '<th>', '<td>', '<tr>']
if html:
vld.validate_fragment(body)
assert all(t in body for t in tags)
serious_errors = [
e for e in vld.errors
if e["message"] not in ACCEPTABLE_ERROR_MESSAGES
]
assert not serious_errors
print(vld.warnings)
else:
assert not any(t in body for t in tags)
assert all(c in body for c in expected_data["contents"])
def test_fcs(self):
"""test FCS parsing"""
# store test files and expectations
test_files = {
'normal.fcs': {
'columns_string': 'FSC-A,SSC-A,FL1-A,FL2-A,FL3-A,FL4-A,FSC-H,SSC-H,FL1-H,FL2-H,FL3-H,FL4-H,Width,Time',
'in_body': '<th>FL3-H</th>',
'in_meta_keys': '#P1MaxUsefulDataChannel',
'in_meta_values': '491519',
'has_warnings': False,
},
'meta_only.fcs': {
'in_meta_keys': '_channel_names_',
'in_meta_values': 'Compensation Controls_G710 Stained Control.fcs',
'has_warnings': True,
},
}
for file, expected_data in test_files.items():
in_file = os.path.join(BASE_DIR, 'fcs', file)
with open(in_file, mode='rb') as fcs:
body, info = extract_fcs(fcs)
if body != "":
assert expected_data['in_body'] in body
assert not expected_data.get('has_warnings')
else:
assert expected_data['has_warnings']
assert info['warnings']
assert expected_data['in_meta_keys'] in info['metadata'].keys()
assert expected_data['in_meta_values'] in info['metadata'].values()
# when there's a body, check if columns only works
if expected_data.get('in_body'):
# move to start so we can use the file-like a second time
fcs.seek(0)
body, info = extract_fcs(fcs, as_html=False)
assert body == expected_data['columns_string']
def test_long(self):
"""test a text file with lots of lines"""
txt = BASE_DIR / 'long.txt'
max_lines = 500
max_bytes = 10000
with open(txt, 'rb') as file_obj:
lines = get_preview_lines(iterate_chunks(file_obj), None, max_lines, max_bytes)
assert len(lines) == max_lines, 'unexpected number of lines'
assert lines[0] == 'Line 1', 'unexpected first line'
assert lines[-1] == f'Line {max_lines}', 'unexpected last line'
def test_long_gz(self):
"""test a gzipped text file with lots of lines"""
txt = BASE_DIR / 'long.txt.gz'
max_lines = 500
max_bytes = 10000
with open(txt, 'rb') as file_obj:
lines = get_preview_lines(iterate_chunks(file_obj), 'gz', max_lines, max_bytes)
assert len(lines) == max_lines, 'unexpected number of lines'
assert lines[0] == 'Line 1', 'unexpected first line'
assert lines[-1] == f'Line {max_lines}', 'unexpected last line'
def test_txt_max_bytes(self):
"""test truncation to CATALOG_LIMIT_BYTES"""
txt = BASE_DIR / 'two-line.txt'
max_lines = 500
max_bytes = 5
with open(txt, 'rb') as file_obj:
lines = get_preview_lines(iterate_chunks(file_obj), None, max_lines, max_bytes)
assert len(lines) == 1, 'failed to truncate bytes'
assert lines[0] == '1234😊', 'failed to truncate bytes'
def test_txt_max_bytes_one_line(self):
"""test truncation to CATALOG_LIMIT_BYTES"""
txt = BASE_DIR / 'one-line.txt'
max_lines = 500
max_bytes = 8
chunk_size = 10
with open(txt, 'rb') as file_obj:
lines = get_preview_lines(
iterate_chunks(file_obj, chunk_size),
None,
max_lines,
max_bytes
)
assert len(lines) == 1, 'failed to truncate bytes'
assert lines[0] == '🚷🚯', 'failed to truncate bytes'
def test_bytes(self):
txt = BASE_DIR / 'long.txt.gz'
with open(txt, 'rb') as file_obj:
buffer = get_bytes(file_obj, 'gz')
lines = buffer.getvalue().splitlines()
assert lines[0] == b'Line 1'
assert lines[-1] == b'Line 999'
|
tools/state_updates.py
|
pnbruckner/homeassistant-conf
| 165 |
130125
|
#!/usr/bin/env python3
from collections import OrderedDict
import re
import sys
try:
filename = sys.argv[1]
if '.' not in sys.argv[2]:
raise ValueError
except:
print('Usage: python3 {} filename (entity_id [attribute...])...'.format(sys.argv[0]))
sys.exit(1)
attrs = {}
entity_id = None
for arg in sys.argv[2:]:
if '.' in arg:
if entity_id is not None:
attrs[entity_id] = entity_attrs
entity_id = arg
entity_attrs = []
else:
entity_attrs.append(arg)
attrs[entity_id] = entity_attrs
haevent = re.compile(
r'([0-9-]+ [0-9:]+).*homeassistant_(start|started|stop|final_write|close)\[.*'
)
new_state_none = re.compile(r'([0-9-]+ [0-9:]+)(.*)new_state=None(.*)')
ent_id = re.compile(r'.*entity_id=([^,>]+).*')
new_state = re.compile(
r'([0-9-]+ [0-9:]+).*new_state=<state ([^=]+)=([^;]*); (.*) @ ([0-9+-:.T]+)>.*')
new_state2 = re.compile(
r'([0-9-]+ [0-9:]+).*new_state=<state ([^=]+)=([^@]*) @ ([0-9+-:.T]+)>.*')
ent_hdr = 'entity_id'
max_ent = len(ent_hdr)
ts_hdr = 'log time'
max_ts = len(ts_hdr)
lc_hdr = 'last_changed'
max_lc = len(lc_hdr)
state_hdr = 'state'
max_state = len(state_hdr)
if len(attrs) == 1:
max_attr = {}
for attr in entity_attrs:
max_attr[attr] = len(attr)
else:
attr_hdr = 'attributes'
HAEVENT = 'Home Assistant'
HAFMT = ' {} {{}} '.format(HAEVENT)
states = []
with open(filename) as f:
for line in f:
m = haevent.match(line)
if m:
ts = m.group(1)
max_ts = max(max_ts, len(ts))
last_changed = HAFMT.format(m.group(2).replace('_', ' ').title())
max_lc = max(max_lc, len(last_changed))
states.append((None, ts, last_changed, None, None))
continue
m = new_state_none.match(line)
if m:
n = ent_id.match(m.group(2)) or ent_id.match(m.group(3))
entity_id = n.group(1)
if entity_id in attrs:
max_ent = max(max_ent, len(entity_id))
ts = m.group(1)
max_ts = max(max_ts, len(ts))
state = '=== None ==='
max_state = max(max_state, len(state))
states.append((entity_id, ts, '', state, {}))
continue
m = new_state.match(line)
if m:
s = m.group(4)
last_changed = m.group(5)
else:
m = new_state2.match(line)
s = ''
last_changed = m.group(4) if m else ''
if m and m.group(2) in attrs:
entity_id = m.group(2)
max_ent = max(max_ent, len(entity_id))
ts = m.group(1)
max_ts = max(max_ts, len(ts))
max_lc = max(max_lc, len(last_changed))
state = m.group(3)
max_state = max(max_state, len(state))
_attrs = OrderedDict()
for attr in attrs[entity_id]:
try:
start = s.index(attr+'=')+len(attr)+1
_attr = s[start:s.rfind(', ', start, s.find('=', start))]
except:
_attr = '???'
_attrs[attr] = _attr
if len(attrs) == 1:
for attr in entity_attrs:
max_attr[attr] = max(max_attr[attr], len(_attrs[attr]))
states.append((entity_id, ts, last_changed, state, _attrs))
if len(attrs) > 1:
print('{:{}} | '.format(ent_hdr, max_ent), end='')
print('{:{}} | {:{}} | {:{}}'.format(ts_hdr, max_ts, lc_hdr, max_lc, state_hdr, max_state), end='')
if len(attrs) == 1:
for attr in entity_attrs:
print(' | {:{}}'.format(attr, max_attr[attr]), end='')
else:
print(' | {}'.format(attr_hdr), end='')
print('')
if len(attrs) > 1:
print('-'*max_ent, end='-|-')
print('-'*max_ts, '-'*max_lc, '-'*max_state, sep='-|-', end='')
if len(attrs) == 1:
for attr in entity_attrs:
print('', '-'*max_attr[attr], sep='-|-', end='')
else:
print('-|-', end='')
print('-'*len(attr_hdr), end='')
print('')
prev_entity_id = None
for entity_id, ts, last_changed, state, _attrs in states:
if HAEVENT in last_changed:
entity_id = '='*max_ent
last_changed = '{:=^{}}'.format(last_changed, max_lc)
state = '='*max_state
if len(attrs) == 1:
_attrs = OrderedDict()
for attr in entity_attrs:
_attrs[attr] = '='*max_attr[attr]
else:
_attrs = {'=': '='*(len(attr_hdr)-2)}
if len(attrs) > 1:
print('{:{}} | '.format('' if entity_id == prev_entity_id and HAEVENT not in last_changed else entity_id, max_ent), end='')
prev_entity_id = entity_id
print('{:{}} | {:{}} | {:{}}'.format(ts, max_ts, last_changed , max_lc, state , max_state), end='')
if len(attrs) == 1:
for k,v in _attrs.items():
print(' | {:{}}'.format(v if HAEVENT not in last_changed else '='*max_attr[k], max_attr[k]), end='')
else:
print(' |', end='')
for k,v in _attrs.items():
print(' {}={}'.format(k, v), end='')
print('')
|
Chapter 4/restful_python_chapter_04_05/gamesapi/games/admin.py
|
Mohamed2011-bit/Building-RESTful-Python-Web-Services
| 116 |
130133
|
<gh_stars>100-1000
"""
Book: Building RESTful Python Web Services
Chapter 4: Throttling, Filtering, Testing and Deploying an API with Django
Author: <NAME> - Twitter.com/gastonhillar
Publisher: Packt Publishing Ltd. - http://www.packtpub.com
"""
from django.contrib import admin
# Register your models here.
|
tests/unit/operations/test_sshops.py
|
myungseokang/aws-elastic-beanstalk-cli
| 110 |
130179
|
<filename>tests/unit/operations/test_sshops.py
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import shutil
from copy import deepcopy
import mock
import unittest
from ebcli.operations import sshops
from .. import mock_responses
class TestSSHOps(unittest.TestCase):
def setUp(self):
self.root_dir = os.getcwd()
if not os.path.exists('testDir'):
os.mkdir('testDir')
os.chdir('testDir')
def tearDown(self):
os.chdir(self.root_dir)
shutil.rmtree('testDir')
@mock.patch('ebcli.operations.sshops.io.prompt')
@mock.patch('ebcli.operations.sshops.subprocess.call')
@mock.patch('ebcli.operations.sshops.commonops.upload_keypair_if_needed')
def test_generate_and_upload_keypair__exit_code_0(
self,
upload_keypair_if_needed_mock,
call_mock,
prompt_mock
):
prompt_mock.return_value = 'aws-eb-us-west-2'
call_mock.return_value = 0
self.assertEqual(
'aws-eb-us-west-2',
sshops._generate_and_upload_keypair(['aws-eb', 'aws-eb-us-east-2'])
)
upload_keypair_if_needed_mock.assert_called_once_with('aws-eb-us-west-2')
call_mock.assert_called_once_with(
[
'ssh-keygen',
'-f',
os.path.expanduser('~') + '{0}.ssh{0}aws-eb-us-west-2'.format(os.path.sep),
'-C',
'aws-eb-us-west-2'
]
)
@mock.patch('ebcli.operations.sshops.io.prompt')
@mock.patch('ebcli.operations.sshops.subprocess.call')
@mock.patch('ebcli.operations.sshops.commonops.upload_keypair_if_needed')
def test_generate_and_upload_keypair__exit_code_1(
self,
upload_keypair_if_needed_mock,
call_mock,
prompt_mock
):
prompt_mock.return_value = 'aws-eb-us-west-2'
call_mock.return_value = 1
self.assertEqual(
'aws-eb-us-west-2',
sshops._generate_and_upload_keypair(['aws-eb', 'aws-eb-us-east-2'])
)
upload_keypair_if_needed_mock.assert_called_once_with('aws-eb-us-west-2')
call_mock.assert_called_once_with(
[
'ssh-keygen',
'-f',
os.path.expanduser('~') + '{0}.ssh{0}aws-eb-us-west-2'.format(os.path.sep),
'-C',
'aws-eb-us-west-2'
]
)
@mock.patch('ebcli.operations.sshops.io.prompt')
@mock.patch('ebcli.operations.sshops.subprocess.call')
def test_generate_and_upload_keypair__exit_code_is_other_than_1_and_0(
self,
call_mock,
prompt_mock
):
prompt_mock.return_value = 'aws-eb-us-west-2'
call_mock.return_value = 2
with self.assertRaises(sshops.CommandError) as context_manager:
sshops._generate_and_upload_keypair(['aws-eb', 'aws-eb-us-east-2'])
self.assertEqual(
'An error occurred while running ssh-keygen.',
str(context_manager.exception)
)
@mock.patch('ebcli.operations.sshops.io.prompt')
@mock.patch('ebcli.operations.sshops.subprocess.call')
def test_generate_and_upload_keypair__ssh_keygen_not_present(
self,
call_mock,
prompt_mock
):
prompt_mock.return_value = 'aws-eb-us-west-2'
call_mock.sideeffect = OSError
with self.assertRaises(sshops.CommandError) as context_manager:
sshops._generate_and_upload_keypair(['aws-eb', 'aws-eb-us-east-2'])
self.assertEqual(
'An error occurred while running ssh-keygen.',
str(context_manager.exception)
)
@mock.patch('ebcli.operations.sshops.utils.prompt_for_item_in_list')
@mock.patch('ebcli.operations.sshops._generate_and_upload_keypair')
@mock.patch('ebcli.operations.sshops.ec2.get_key_pairs')
@mock.patch('ebcli.operations.sshops.io.validate_action')
def test_prompt_for_ec2_keyname(
self,
validate_action_mock,
get_key_pairs_mock,
generate_and_upload_keypair_mock,
prompt_for_item_in_list_mock
):
get_key_pairs_mock.return_value = mock_responses.DESCRIBE_KEY_PAIRS_RESPONSE['KeyPairs']
prompt_for_item_in_list_mock.return_value = '[ Create new KeyPair ]'
sshops.prompt_for_ec2_keyname('my-environment')
validate_action_mock.assert_called_once_with('To confirm, type the environment name', 'my-environment')
generate_and_upload_keypair_mock.assert_called_once_with(['key_pair_1', 'key_pair_2', '[ Create new KeyPair ]'])
@mock.patch('ebcli.operations.sshops.utils.prompt_for_item_in_list')
@mock.patch('ebcli.operations.sshops._generate_and_upload_keypair')
@mock.patch('ebcli.operations.sshops.ec2.get_key_pairs')
@mock.patch('ebcli.operations.sshops.io.validate_action')
def test_prompt_for_ec2_keyname__choose_existing_key(
self,
validate_action_mock,
get_key_pairs_mock,
generate_and_upload_keypair_mock,
prompt_for_item_in_list_mock
):
get_key_pairs_mock.return_value = mock_responses.DESCRIBE_KEY_PAIRS_RESPONSE['KeyPairs']
prompt_for_item_in_list_mock.return_value = 'key_pair_2'
sshops.prompt_for_ec2_keyname('my-environment')
validate_action_mock.assert_called_once_with('To confirm, type the environment name', 'my-environment')
generate_and_upload_keypair_mock.assert_not_called()
@mock.patch('ebcli.operations.sshops.utils.prompt_for_item_in_list')
@mock.patch('ebcli.operations.sshops._generate_and_upload_keypair')
@mock.patch('ebcli.operations.sshops.ec2.get_key_pairs')
@mock.patch('ebcli.operations.sshops.io.get_boolean_response')
def test_prompt_for_ec2_keyname__get_boolean_response_to_confirm_termination(
self,
get_boolean_response_mock,
get_key_pairs_mock,
generate_and_upload_keypair_mock,
prompt_for_item_in_list_mock
):
get_key_pairs_mock.return_value = mock_responses.DESCRIBE_KEY_PAIRS_RESPONSE['KeyPairs']
prompt_for_item_in_list_mock.return_value = 'key_pair_2'
get_boolean_response_mock.return_value = True
sshops.prompt_for_ec2_keyname()
generate_and_upload_keypair_mock.assert_not_called()
@mock.patch('ebcli.operations.sshops._generate_and_upload_keypair')
@mock.patch('ebcli.operations.sshops.ec2.get_key_pairs')
@mock.patch('ebcli.operations.sshops.io.validate_action')
@mock.patch('ebcli.operations.sshops.io.get_boolean_response')
def test_prompt_for_ec2_keyname__no_keys_exist(
self,
get_boolean_response_mock,
validate_action_mock,
get_key_pairs_mock,
generate_and_upload_keypair_mock
):
get_key_pairs_mock.return_value = []
get_boolean_response_mock.return_value = True
sshops.prompt_for_ec2_keyname('my-environment')
generate_and_upload_keypair_mock.assert_called_once_with([])
validate_action_mock.assert_called_once()
@mock.patch('ebcli.operations.sshops.fileoperations.get_ssh_folder')
def test_get_ssh_file(
self,
get_ssh_folder_mock
):
open('aws-eb-us-west-2', 'w').close()
get_ssh_folder_mock.return_value = os.getcwd() + os.path.sep
sshops._get_ssh_file('aws-eb-us-west-2').endswith('testDir{}aws-eb-us-west-2'.format(os.pathsep))
@mock.patch('ebcli.operations.sshops.fileoperations.get_ssh_folder')
def test_get_ssh_file__file_present_as_pem(
self,
get_ssh_folder_mock
):
open('aws-eb-us-west-2.pem', 'w').close()
get_ssh_folder_mock.return_value = os.getcwd() + os.path.sep
sshops._get_ssh_file('aws-eb-us-west-2').endswith('testDir{}aws-eb-us-west-2.pem'.format(os.pathsep))
@mock.patch('ebcli.operations.sshops.fileoperations.get_ssh_folder')
def test_get_ssh_file__file_absent(
self,
get_ssh_folder_mock
):
open('aws-eb-us-west-2.pem', 'w').close()
get_ssh_folder_mock.return_value = os.getcwd() + os.path.sep
with self.assertRaises(sshops.NotFoundError) as context_manager:
sshops._get_ssh_file('absent_file').endswith('testDir{}aws-eb-us-west-2.pem'.format(os.pathsep))
self.assertEqual(
'The EB CLI cannot find your SSH key file for keyname "absent_file". '
'Your SSH key file must be located in the .ssh folder in your home directory.',
str(context_manager.exception))
@mock.patch('ebcli.operations.sshops.ec2.describe_instance')
def test_ssh_into_instance__no_key_pair(
self,
describe_instance_mock
):
describe_instance_mock.return_value = dict()
with self.assertRaises(sshops.NoKeypairError):
sshops.ssh_into_instance('some-instance-id')
@mock.patch('ebcli.operations.sshops.ec2.describe_instance')
@mock.patch('ebcli.operations.sshops.ec2.describe_security_group')
@mock.patch('ebcli.operations.sshops.ec2.authorize_ssh')
@mock.patch('ebcli.operations.sshops._get_ssh_file')
@mock.patch('ebcli.operations.sshops.subprocess.call')
def test_ssh_into_instance(
self,
call_mock,
_get_ssh_file_mock,
authorize_ssh_mock,
describe_security_group_mock,
describe_instance_mock
):
describe_instance_mock.return_value = mock_responses.DESCRIBE_INSTANCES_RESPONSE['Reservations'][0]['Instances'][0]
describe_security_group_mock.return_value = mock_responses.DESCRIBE_SECURITY_GROUPS_RESPONSE['SecurityGroups'][0]
_get_ssh_file_mock.return_value = 'aws-eb-us-west-2'
call_mock.return_value = 0
sshops.ssh_into_instance('instance-id')
@mock.patch('ebcli.operations.sshops.ec2.describe_instance')
@mock.patch('ebcli.operations.sshops.ec2.describe_security_group')
@mock.patch('ebcli.operations.sshops.ec2.authorize_ssh')
@mock.patch('ebcli.operations.sshops._get_ssh_file')
@mock.patch('ebcli.operations.sshops.subprocess.call')
def test_ssh_into_instance__ssh_fails(
self,
call_mock,
_get_ssh_file_mock,
authorize_ssh_mock,
describe_security_group_mock,
describe_instance_mock
):
describe_instance_mock.return_value = mock_responses.DESCRIBE_INSTANCES_RESPONSE['Reservations'][0]['Instances'][0]
describe_security_group_mock.return_value = mock_responses.DESCRIBE_SECURITY_GROUPS_RESPONSE['SecurityGroups'][0]
_get_ssh_file_mock.return_value = 'aws-eb-us-west-2'
call_mock.return_value = 1
with self.assertRaises(sshops.CommandError) as context_manager:
sshops.ssh_into_instance('instance-id')
self.assertEqual(
'An error occurred while running: ssh.',
str(context_manager.exception)
)
@mock.patch('ebcli.operations.sshops.ec2.describe_instance')
@mock.patch('ebcli.operations.sshops._get_ssh_file')
@mock.patch('ebcli.operations.sshops.subprocess.call')
def test_ssh_into_instance__neither_public_nor_private_ip_found(
self,
call_mock,
_get_ssh_file_mock,
describe_instance_mock
):
describe_instance_response = deepcopy(mock_responses.DESCRIBE_INSTANCES_RESPONSE['Reservations'][0]['Instances'][0])
del describe_instance_response['PublicIpAddress']
del describe_instance_response['PrivateIpAddress']
describe_instance_mock.return_value = describe_instance_response
_get_ssh_file_mock.return_value = 'aws-eb-us-west-2'
call_mock.return_value = 0
with self.assertRaises(sshops.NotFoundError):
sshops.ssh_into_instance('instance-id')
@mock.patch('ebcli.operations.sshops.ec2.describe_instance')
@mock.patch('ebcli.operations.sshops.ec2.describe_security_group')
@mock.patch('ebcli.operations.sshops.ec2.authorize_ssh')
@mock.patch('ebcli.operations.sshops._get_ssh_file')
@mock.patch('ebcli.operations.sshops.subprocess.call')
def test_ssh_into_instance__uses_private_address(
self,
call_mock,
_get_ssh_file_mock,
authorize_ssh_mock,
describe_security_group_mock,
describe_instance_mock
):
describe_instance_response = deepcopy(mock_responses.DESCRIBE_INSTANCES_RESPONSE['Reservations'][0]['Instances'][0])
del describe_instance_response['PublicIpAddress']
describe_instance_mock.return_value = describe_instance_response
describe_security_group_mock.return_value = mock_responses.DESCRIBE_SECURITY_GROUPS_RESPONSE['SecurityGroups'][0]
_get_ssh_file_mock.return_value = 'aws-eb-us-west-2'
call_mock.return_value = 0
sshops.ssh_into_instance('instance-id')
call_mock.assert_called_once_with(['ssh', '-i', 'aws-eb-us-west-2', '[email protected]'])
@mock.patch('ebcli.operations.sshops.ec2.describe_instance')
@mock.patch('ebcli.operations.sshops.ec2.describe_security_group')
@mock.patch('ebcli.operations.sshops.ec2.revoke_ssh')
@mock.patch('ebcli.operations.sshops.ec2.authorize_ssh')
@mock.patch('ebcli.operations.sshops._get_ssh_file')
@mock.patch('ebcli.operations.sshops.subprocess.call')
def test_ssh_into_instance__ssh_rule_exists(
self,
call_mock,
_get_ssh_file_mock,
authorize_ssh_mock,
revoke_ssh_mock,
describe_security_group_mock,
describe_instance_mock
):
describe_instance_response = deepcopy(mock_responses.DESCRIBE_INSTANCES_RESPONSE['Reservations'][0]['Instances'][0])
describe_instance_mock.return_value = describe_instance_response
describe_security_group_mock.return_value = mock_responses.DESCRIBE_SECURITY_GROUPS_RESPONSE['SecurityGroups'][0]
_get_ssh_file_mock.return_value = 'aws-eb-us-west-2'
call_mock.return_value = 0
sshops.ssh_into_instance('instance-id')
authorize_ssh_mock.assert_not_called()
revoke_ssh_mock.assert_not_called()
call_mock.assert_called_once_with(['ssh', '-i', 'aws-eb-us-west-2', '[email protected]'])
@mock.patch('ebcli.operations.sshops.ec2.describe_instance')
@mock.patch('ebcli.operations.sshops.ec2.describe_security_group')
@mock.patch('ebcli.operations.sshops.ec2.revoke_ssh')
@mock.patch('ebcli.operations.sshops.ec2.authorize_ssh')
@mock.patch('ebcli.operations.sshops._get_ssh_file')
@mock.patch('ebcli.operations.sshops.subprocess.call')
def test_ssh_into_instance__no_ssh_rule_exists(
self,
call_mock,
_get_ssh_file_mock,
authorize_ssh_mock,
revoke_ssh_mock,
describe_security_group_mock,
describe_instance_mock
):
describe_instance_response = deepcopy(mock_responses.DESCRIBE_INSTANCES_RESPONSE['Reservations'][0]['Instances'][0])
describe_instance_mock.return_value = describe_instance_response
describe_security_group_mock.return_value = mock_responses.DESCRIBE_SECURITY_GROUPS_RESPONSE['SecurityGroups'][1]
_get_ssh_file_mock.return_value = 'aws-eb-us-west-2'
call_mock.return_value = 0
sshops.ssh_into_instance('instance-id')
authorize_ssh_mock.assert_called_once_with('sg-12312313')
revoke_ssh_mock.assert_called_once_with('sg-12312313')
call_mock.assert_called_once_with(['ssh', '-i', 'aws-eb-us-west-2', '[email protected]'])
@mock.patch('ebcli.operations.sshops.prompt_for_ec2_keyname')
@mock.patch('ebcli.operations.sshops.commonops.update_environment')
def test_setup_ssh(
self,
update_environment_mock,
prompt_for_ec2_keyname_mock
):
prompt_for_ec2_keyname_mock.return_value = 'aws-eb-us-west-2'
sshops.setup_ssh('my-environment', 'aws-eb-us-west-2')
update_environment_mock.assert_called_once_with(
'my-environment',
[
{
'Namespace': 'aws:autoscaling:launchconfiguration',
'OptionName': 'EC2KeyName',
'Value': 'aws-eb-us-west-2'
}
],
False,
timeout=5
)
@mock.patch('ebcli.operations.sshops.prompt_for_ec2_keyname')
@mock.patch('ebcli.operations.sshops.commonops.update_environment')
def test_setup_ssh__keyname_not_entered(
self,
update_environment_mock,
prompt_for_ec2_keyname_mock
):
prompt_for_ec2_keyname_mock.return_value = None
sshops.setup_ssh('my-environment', 'aws-eb-us-west-2')
update_environment_mock.assert_not_called()
@mock.patch('ebcli.operations.sshops.setup_ssh')
def test_prepare_for_ssh(
self,
setup_ssh_mock
):
sshops.prepare_for_ssh(
'my-environment',
'instance',
False,
False,
True,
None
)
setup_ssh_mock.assert_called_once_with('my-environment', None, timeout=None)
def test_prepare_for_ssh__instance_and_number(self):
with self.assertRaises(sshops.InvalidOptionsError) as context_manager:
sshops.prepare_for_ssh(
'my-environment',
'instance',
False,
False,
False,
1
)
self.assertEqual(
'You cannot use the "--instance" and "--number" options together.',
str(context_manager.exception)
)
@mock.patch('ebcli.operations.sshops.commonops.get_instance_ids')
@mock.patch('ebcli.operations.sshops.utils.prompt_for_item_in_list')
@mock.patch('ebcli.operations.sshops.ssh_into_instance')
def test_prepare_for_ssh__choose_instance_to_ssh_into(
self,
ssh_into_instance_mock,
prompt_for_item_in_list_mock,
get_instance_ids_mock
):
get_instance_ids_mock.return_value = [
'i-123123123123',
'i-234234234424',
'i-353454535434',
]
prompt_for_item_in_list_mock.return_value = 'i-353454535434'
sshops.prepare_for_ssh(
'my-environment',
None,
False,
False,
False,
None
)
ssh_into_instance_mock.assert_called_once_with(
'i-353454535434',
command=None,
custom_ssh=None,
force_open=False,
keep_open=False
)
@mock.patch('ebcli.operations.sshops.commonops.get_instance_ids')
@mock.patch('ebcli.operations.sshops.utils.prompt_for_item_in_list')
@mock.patch('ebcli.operations.sshops.ssh_into_instance')
def test_prepare_for_ssh__choose_instance_to_ssh_into(
self,
ssh_into_instance_mock,
prompt_for_item_in_list_mock,
get_instance_ids_mock
):
get_instance_ids_mock.return_value = [
'i-123123123123',
]
prompt_for_item_in_list_mock.return_value = 'i-353454535434'
sshops.prepare_for_ssh(
'my-environment',
None,
False,
False,
False,
None
)
ssh_into_instance_mock.assert_called_once_with(
'i-123123123123',
command=None,
custom_ssh=None,
force_open=False,
keep_open=False
)
@mock.patch('ebcli.operations.sshops.commonops.get_instance_ids')
@mock.patch('ebcli.operations.sshops.utils.prompt_for_item_in_list')
@mock.patch('ebcli.operations.sshops.ssh_into_instance')
def test_prepare_for_ssh__number_of_instance_specified(
self,
ssh_into_instance_mock,
prompt_for_item_in_list_mock,
get_instance_ids_mock
):
get_instance_ids_mock.return_value = [
'i-123123123123',
'i-234234234424',
'i-353454535434',
]
prompt_for_item_in_list_mock.return_value = 'i-353454535434'
sshops.prepare_for_ssh(
'my-environment',
None,
False,
False,
False,
2
)
ssh_into_instance_mock.assert_called_once_with(
'i-234234234424',
command=None,
custom_ssh=None,
force_open=False,
keep_open=False
)
@mock.patch('ebcli.operations.sshops.commonops.get_instance_ids')
@mock.patch('ebcli.operations.sshops.utils.prompt_for_item_in_list')
@mock.patch('ebcli.operations.sshops.ssh_into_instance')
@mock.patch('ebcli.operations.sshops.io.log_error')
def test_prepare_for_ssh__ssh_into_instance_fails(
self,
log_error_mock,
ssh_into_instance_mock,
prompt_for_item_in_list_mock,
get_instance_ids_mock
):
get_instance_ids_mock.return_value = [
'i-123123123123',
'i-234234234424',
'i-353454535434',
]
prompt_for_item_in_list_mock.return_value = 'i-353454535434'
ssh_into_instance_mock.side_effect = sshops.NoKeypairError
sshops.prepare_for_ssh(
'my-environment',
None,
False,
False,
False,
2
)
ssh_into_instance_mock.assert_called_once_with(
'i-234234234424',
command=None,
custom_ssh=None,
force_open=False,
keep_open=False
)
log_error_mock.assert_called_once_with(
'This environment is not set up for SSH. Use "eb ssh --setup" to set up SSH for the environment.'
)
|
python_utils/pycaffe_utils.py
|
xzabg/fast-adversarial
| 520 |
130182
|
# ---------------------------------------------------------
# Copyright (c) 2015, <NAME>
#
# Licensed under The MIT License [see LICENSE for details]
# ---------------------------------------------------------
import caffe, yaml
def net_surgery(net, json_file_or_dict):
# Load the JSON file
if isinstance(json_file_or_dict, str):
with open(json_file_or_dict, 'rt') as f:
source_description = yaml.load(f)
else:
source_description = json_file_or_dict
# Find a list of blobs in the target net
target_blobs = net.params.keys()
target_blobs = dict(zip(target_blobs, [0]*len(target_blobs)))
# For each item in the json file load the network and copy the layers
for src_desc in source_description:
net_source = caffe.Net(src_desc['prototxt'], src_desc['model'], caffe.TEST)
for j in xrange(len(src_desc['copy_ops']['dest'])):
dest_name = src_desc['copy_ops']['dest'][j]
assert dest_name in target_blobs, \
'Destination name {} not in target network blobs'.format(dest_name)
src_name = src_desc['copy_ops']['source'][j]
assert src_name in net_source.params.keys(), \
'Source name {} not in source network blobs'.format(src_name)
allow_different_shape = src_desc['copy_ops']['reshape'][j]
if target_blobs[dest_name] is not 0:
print 'Target blob {} is being reassigned'.format(dest_name)
target_blobs[dest_name] = target_blobs[dest_name] + 1
assert(len(net.params[dest_name]) == \
len(net_source.params[src_name])), \
'Number of blobs in {} in source do not match number of blobs in {} in destination'\
.format(src_name, dest_name)
for k in xrange(len(net.params[dest_name])):
src = net_source.params[src_name][k]
dest = net.params[dest_name][k]
if allow_different_shape:
assert(src.count == dest.count), \
'Count of blobs in {}[{:d}] in source do not match count of blobs in {}[{:d}] in destination'\
.format(src_name, k, dest_name, k)
dest.data[...] = src.data.reshape(dest.data.shape)
else:
src_shape = src.data.shape
dest_shape = dest.data.shape
assert(src_shape == dest_shape), \
'Shape of blobs in {}[{:d}] {} in source do not match shape of blobs in {}[{:d}] {} in destination'\
.format(src_name, k, str(src_shape), dest_name, k, str(dest_shape))
dest.data[...] = src.data
unusual = [x for x in target_blobs.keys() if target_blobs[x] is not 1]
for x in unusual:
print 'Parameter blob {} copied {:d} times.'.format(x, target_blobs[x])
return target_blobs
|
ipfshttpclient/filescanner.py
|
Eternity-labs/py-ipfs-http-client
| 186 |
130215
|
<reponame>Eternity-labs/py-ipfs-http-client
import abc
import collections.abc
import enum
import fnmatch
import os
import re
import sys
import types
import typing as ty
from . import utils
from .exceptions import MatcherSpecInvalidError
# PyCharm rejects typing.AnyStr and will flag all usages of an error,
# effectively breaking PyCharm's ability to provide typing assistance.
#
# To encourage contributions from PyCharm users, we redefine AnyStr.
#
# This will get inlined if/when PyCharm no longer flags typing.AnyStr.
AnyStr = ty.TypeVar('AnyStr', bytes, str)
if sys.version_info >= (3, 7): #PY37+
re_pattern_type = re.Pattern
if ty.TYPE_CHECKING:
re_pattern_t = re.Pattern[AnyStr]
else:
re_pattern_t = re.Pattern
else: #PY36-
re_pattern_t = re_pattern_type = type(re.compile(""))
# Windows does not have os.O_DIRECTORY
O_DIRECTORY: int = getattr(os, "O_DIRECTORY", 0)
# Neither Windows nor MacOS have os.fwalk even through Python 3.9
HAVE_FWALK: bool = hasattr(os, "fwalk")
HAVE_FWALK_BYTES = HAVE_FWALK and sys.version_info >= (3, 7)
class FSNodeType(enum.Enum):
FILE = enum.auto()
DIRECTORY = enum.auto()
#XXX: This should be a generic `ty.NamedTuple` subclass, but GH/python/mypy#685 …
class FSNodeEntry(ty.Generic[AnyStr]):
type: FSNodeType
path: AnyStr
relpath: AnyStr
name: AnyStr
parentfd: ty.Optional[int]
def __init__(
self,
type: FSNodeType,
path: AnyStr,
relpath: AnyStr,
name: AnyStr,
parentfd: ty.Optional[int]) -> None:
self.type = type
self.path = path
self.relpath = relpath
self.name = name
self.parentfd = parentfd
def __repr__(self) -> str:
return (
f'FSNodeEntry('
f'type={self.type!r}, '
f'path={self.path!r}, '
f'relpath={self.relpath!r}, '
f'name={self.name!r}, '
f'parentfd={self.parentfd!r}'
f')'
)
def __str__(self) -> str:
return str(self.path)
class Matcher(ty.Generic[AnyStr], metaclass=abc.ABCMeta):
"""Represents a type that can match on file paths and decide whether they
should be included in some file scanning/adding operation"""
__slots__ = ("is_binary",)
is_binary: bool
def __init__(self, is_binary: bool = False) -> None:
self.is_binary = is_binary
@abc.abstractmethod
def should_descend(self, path: AnyStr) -> bool:
r"""Decides whether the file scanner should descend into the given directory path
Arguments
---------
path
A directory path upholding the same guarantees as those
mentioned in :meth:`should_store`
"""
@abc.abstractmethod
def should_report(self, path: AnyStr, *, is_dir: bool) -> bool:
r"""Decides whether the file scanner should store the given file or directory
Note that in this case “file” may refer to anything that is not a
directory and not just regular files. If the settings of the file scanner
do not permit it to follow symbolic links this may even include symbolic
links pointing at directories.
Arguments
---------
path
The file or directory path to check – the argument's type depends on
the type of the path originally passed to the file scanner and may
either be :type:`bytes` or :type:`str`, but will usually be :type:`str`
The given path is guaranteed to have the following additional properties:
* It will be properly normalized: There won't be any empty (``…//…`),
single-dot (``…/./…``) or (``…/../…``) directory labels or leading
or trailing slashes.
* Its path separator will match the one found in :var:`os.path.sep` –
that is: It will be \ on Windows and / everywhere else.
* It will be relative to the file scanner's base directory.
is_dir
Whether the given path refers to a directory, see the above paragraph
for what this means exactly
"""
class MatchAll(ty.Generic[AnyStr], Matcher[AnyStr]):
"""I want it all – I want it now…"""
__slots__ = ()
def should_descend(self, path: AnyStr) -> utils.Literal_True:
return True
def should_report(self, path: AnyStr, *, is_dir: bool) -> utils.Literal_True:
return True
class MatchNone(ty.Generic[AnyStr], Matcher[AnyStr]):
"""Fuck it"""
__slots__ = ()
def should_descend(self, path: AnyStr) -> utils.Literal_False:
return False
def should_report(self, path: AnyStr, *, is_dir: bool) -> utils.Literal_False:
return False
class GlobMatcher(Matcher[AnyStr], ty.Generic[AnyStr]):
"""Matches files and directories according to the shell glob conventions
For details on the syntax see the Python :py:mod:`glob` module that this
class emulates. If your are accustomed the globing on real Unix shells
make sure to also carefully study its limitations as these also apply here.
Also not that this matcher always has recursion enabled and hence treats
``**``-labels as special. Additionally the *period_special* parameter is
provided that may be used to disable the special handling of “dot-files”
(files whose name starts with a leading period).
One important thing to keep in mind that this is a *matcher* and works
entirely I/O-less. As such, trying to include any files or directories
*outside* of the matching domain will *not* work. For instance, a pattern
like ``../a`` or ``b/../c`` would never match anything as a conforming
file scanner would never pass in such a path, the same applies to any notion
of absolute paths. This matcher will try its best to normalize or reject
such cases, but if you're wondering why your pattern just won't match while
pasting it into a real shell works this may be why.
"""
__slots__ = ("period_special", "_sep", "_pat", "_dir_only")
period_special: bool
_sep: AnyStr
_pat: "ty.List[ty.Optional[re_pattern_t[AnyStr]]]"
_dir_only: bool
def __init__(self, pat: AnyStr, *, period_special: bool = True):
"""
Arguments
---------
pat
The glob pattern to use for matching
period_special
Whether a leading period in file/directory names should be matchable by
``*``, ``?`` and ``[…]`` – traditionally they are not, but many modern
shells allow one to disable this behaviour
"""
super().__init__(isinstance(pat, bytes))
self.period_special = period_special
self._sep = utils.maybe_fsencode(os.path.sep, pat)
dblstar = utils.maybe_fsencode("**", pat)
dot = utils.maybe_fsencode(".", pat)
pat_ndot = utils.maybe_fsencode(r"(?![.])", pat)
# Normalize path separator
if os.path.altsep:
pat = pat.replace(utils.maybe_fsencode(os.path.altsep, pat), self._sep)
# Sanity checks for stuff that will definitely NOT EVER match
# (there is another one in the loop below)
assert not os.path.isabs(pat), "Absolute matching patterns will never match"
# Note the extra final slash for its effect of only matching directories
#
# (TBH, I find it hard to see how that is useful, but everybody does it
# and it keeps things consistent overall – something to only match files
# would be nice however.)
self._dir_only = pat.endswith(self._sep)
self._pat = []
for label in pat.split(self._sep):
# Skip over useless path components
if len(label) < 1 or label == dot:
continue
assert label != dot + dot, 'Matching patterns containing ".." will never match'
if label == dblstar:
self._pat.append(None)
elif dblstar in label:
raise NotImplementedError(
"Using double-star (**) and other characters in the same glob "
"path label ({0}) is not currently supported – please do file "
"an issue if you need this!".format(os.fsdecode(label))
)
else:
if not isinstance(label, bytes):
re_expr = fnmatch.translate(label)
else:
re_expr = fnmatch.translate(label.decode("latin-1")).encode("latin-1")
if period_special and not label.startswith(dot):
re_expr = pat_ndot + re_expr
self._pat.append(re.compile(re_expr))
def should_descend(self, path: AnyStr) -> bool:
for idx, label in enumerate(path.split(self._sep)):
# Always descend into any directory below a recursive pattern as we
# cannot predict what we will later do a tail match on
pattern = self._pat[idx]
if pattern is None:
return True
# Do not descend further if we reached the last label of the pattern
# (unless the final pattern label is a recursive match, see above)
#
# This is independent of whether this *directory* will be included
# or not.
if idx == (len(self._pat) - 1):
return False
# Match the current pattern to decide whether to keep looking or not
if not pattern.match(label):
return False
# The given path matched part of this pattern, so we should include this
# directory to go further
return True
def should_report(self, path: AnyStr, *, is_dir: bool) -> bool:
# A final slash means “only match directories”
if self._dir_only and not is_dir:
return False
labels = path.split(self._sep)
return self._match(labels, idx_pat=0, idx_path=0, is_dir=is_dir)
def _match(self, labels: ty.List[AnyStr], *, idx_pat: int, idx_path: int,
is_dir: bool) -> bool:
while idx_pat < len(self._pat):
pattern = self._pat[idx_pat]
if pattern is None:
break
# Match initial labels before recursion
if idx_path >= len(labels):
# Pattern refers to something below this path, store it only if it
# is a directory
return is_dir
elif not pattern.match(labels[idx_path]):
# Pattern did not match
return False
idx_pat += 1
idx_path += 1
dot = utils.maybe_fsencode(".", labels[0])
# We reached the end of the matching labels or the start of recursion
if idx_pat == len(self._pat):
# End of matching labels – only include path if it was of the same
# length or the previous pattern label was recursive
if self._pat[idx_pat - 1] is None:
return not self.period_special or not labels[idx_path].startswith(dot)
else:
return idx_path == len(labels)
# Start of recursion – move to next label and recurse this method too
#
# If the path is then matched by our inferior self return success,
# otherwise retry with the next path label until all labels have been
# exhausted meaning that there was no match.
idx_pat += 1
while idx_path < len(labels):
if self._match(labels, idx_pat=idx_pat, idx_path=idx_path, is_dir=is_dir):
return True
# Do not add dot-files as part of recursive patterns by default
if self.period_special and labels[idx_path].startswith(dot):
break
idx_path += 1
# Nothing matched
return False
class ReMatcher(Matcher[AnyStr], ty.Generic[AnyStr]):
"""Matches files and directories using a regular expression pattern
See the description of :meth:`Matcher.should_store` for the specifics on how
the matching path is formatted, but note that there is one important
difference: In order to allow the regular expression to distinguish between
files and directories, all directories (if *is_dir* is ``True``) contain
a trailing forward or backward slash (depending on the platform). If you
don't care about the this information you may want to add ``[\\/]?`` to end
of the pattern.
Unlike glob patterns, regular expressions cannot be reliably analyzed for
which directories the file paths they may or may not match are in. Because
of this, this matcher will cause the file scanner **to recurse into any
directory it encounters** possibly causing an unnecessarily slow-down during
scanning even if only very few files end up being selected. If this causes
problems for you *use non-recursive glob patterns instead* or implement your
own matcher with a proper :meth:`Matcher.should_descend` method.
"""
__slots__ = ("_pat",)
_pat: "re_pattern_t[AnyStr]"
def __init__(self, pat: ty.Union[AnyStr, "re_pattern_t[AnyStr]"]):
self._pat = re.compile(pat)
super().__init__(not (self._pat.flags & re.UNICODE))
def should_descend(self, path: AnyStr) -> bool:
return True
def should_report(self, path: AnyStr, *, is_dir: bool) -> bool:
suffix: AnyStr = utils.maybe_fsencode(os.path.sep, path) if is_dir else type(path)()
return bool(self._pat.match(path + suffix))
class MetaMatcher(Matcher[AnyStr], ty.Generic[AnyStr]):
"""Match files and directories by delegating to other matchers"""
__slots__ = ("_children",)
_children: ty.List[Matcher[AnyStr]]
def __init__(self, children: ty.List[Matcher[AnyStr]]):
assert len(children) > 0
super().__init__(children[0].is_binary)
self._children = children
def should_descend(self, path: AnyStr) -> bool:
return any(m.should_descend(path) for m in self._children)
def should_report(self, path: AnyStr, *, is_dir: bool) -> bool:
return any(m.should_report(path, is_dir=is_dir) for m in self._children)
class NoRecusionAdapterMatcher(Matcher[AnyStr], ty.Generic[AnyStr]):
"""Matcher adapter that will prevent any recursion
Takes a subordinate matcher, but tells the scanner to never descend into any
child directory and to never store files from such a directory. This is an
effective way to prevent any non-top-level files from being emitted by the
scanner and hence provides ``recursive=False`` semantics.
"""
__slots__ = ("_child",)
_child: Matcher[AnyStr]
def __init__(self, child: Matcher[AnyStr]):
super().__init__(child.is_binary)
self._child = child
def should_descend(self, path: AnyStr) -> bool:
return False
def should_report(self, path: AnyStr, *, is_dir: bool) -> bool:
return utils.maybe_fsencode(os.path.sep, path) not in path \
and self._child.should_report(path, is_dir=is_dir)
if ty.TYPE_CHECKING:
_match_spec_t = ty.Union[AnyStr, re_pattern_t[AnyStr], Matcher[AnyStr]]
else:
# Using `re_pattern_t` here like in the type checking case makes
# sphinx_autodoc_typehints explode
_match_spec_t = ty.Union[AnyStr, re_pattern_t, Matcher[AnyStr]]
match_spec_t = ty.Union[
ty.Iterable[_match_spec_t[AnyStr]],
_match_spec_t[AnyStr]
]
def _require_spec(spec: ty.Optional[match_spec_t[AnyStr]]) -> match_spec_t[AnyStr]:
"""
Assist the type checker by narrowing the number of places accepting Optional.
"""
if spec is None:
return MatchAll()
else:
return spec
@ty.overload
def matcher_from_spec(spec: match_spec_t[bytes], *,
period_special: bool = ...,
recursive: bool = ...) -> Matcher[bytes]:
...
@ty.overload
def matcher_from_spec(spec: match_spec_t[str], *,
period_special: bool = ...,
recursive: bool = ...) -> Matcher[str]:
...
@ty.overload
def matcher_from_spec(spec: None, *,
period_special: bool = ...,
recursive: bool = ...) -> Matcher[str]:
...
def matcher_from_spec(spec: ty.Optional[match_spec_t[AnyStr]], *,
period_special: bool = True,
recursive: bool = True) -> Matcher[AnyStr]:
"""Processes the given simplified matching spec, creating an equivalent :type:`Matcher` object"""
return _matcher_from_spec(
_require_spec(spec),
period_special=period_special,
recursive=recursive
)
def _matcher_from_spec(spec: match_spec_t[AnyStr], *,
period_special: bool = True,
recursive: bool = True) -> Matcher[AnyStr]:
if recursive:
return _recursive_matcher_from_spec(spec, period_special=period_special)
else:
guarded = matcher_from_spec(
spec,
recursive=True,
period_special=period_special
)
return NoRecusionAdapterMatcher(guarded)
def _recursive_matcher_from_spec(spec: match_spec_t[AnyStr], *,
period_special: bool = True) -> Matcher[AnyStr]:
if isinstance(spec, re_pattern_type):
return ReMatcher(spec)
elif isinstance(spec, (str, bytes)):
return GlobMatcher(spec, period_special=period_special)
elif isinstance(spec, Matcher):
return spec
elif isinstance(spec, collections.abc.Iterable):
matchers: ty.List[Matcher[AnyStr]] = [
_recursive_matcher_from_spec(
ty.cast(match_spec_t[AnyStr], s),
period_special=period_special)
for s in spec
]
if len(matchers) == 0: # Edge case: Empty list of matchers
return MatchNone()
elif len(matchers) == 1: # Edge case: List of exactly one matcher
return matchers[0]
else: # Actual list of matchers (plural)
return MetaMatcher(matchers)
else:
raise MatcherSpecInvalidError(spec)
class walk(ty.Generator[FSNodeEntry[AnyStr], ty.Any, None], ty.Generic[AnyStr]):
__slots__ = ("_generator", "_close_fd")
_generator: ty.Generator[FSNodeEntry[AnyStr], ty.Any, None]
_close_fd: ty.Optional[int]
def __init__(
self,
directory: ty.Union[AnyStr, utils.PathLike[AnyStr], int],
match_spec: ty.Optional[match_spec_t[AnyStr]] = None, *,
follow_symlinks: bool = False,
intermediate_dirs: bool = True,
period_special: bool = True,
recursive: bool = True
) -> None:
"""
Arguments
---------
directory
Path to, or file descriptor of, directory to scan
match_spec
Matching rules for limiting the files and directories to include in
the scan
By default everything will be scanned and included.
follow_symlinks
Follow symbolic links while scanning?
period_special
The value to pass to the *period_special* argument of :class:`GlobMatcher`
when constructing such an object from the given *match_spec*
intermediate_dirs
When reporting a file or directory first ensure that all containing
directories were already reported
Due to the way matching works, a matcher may only ask for its target
files to be included but not care about the directories leading up
to that file and this would cause the file to be reported without
these intermediate directories to the caller as well. If you require
these directories to be reported for consistency, this option will
keep track of these intermediate paths and make it appear as if these
had been included up-front.
recursive
Recurse into the given directory while scanning?
If ``False`` this will wrap the given matcher inside
:class:`NoRecusionAdapterMatcher` and hence prevent the scanner from
doing any recursion.
"""
self._close_fd = None
# Create matcher object
matcher: Matcher[AnyStr] = _matcher_from_spec(
_require_spec(match_spec),
recursive=recursive,
period_special=period_special
)
# Convert directory path to string …
if isinstance(directory, int):
if not HAVE_FWALK:
raise NotImplementedError("Passing a file descriptor as directory is "
"not supported on this platform")
self._generator = self._walk(
directory,
None,
matcher,
follow_symlinks,
intermediate_dirs
)
else:
directory_str = os.fspath(directory)
# Best-effort ensure that target directory exists if it is accessed by path
os.stat(directory_str)
# … and possibly open it as a FD if this is supported by the platform
#
# Note: `os.fwalk` support for binary paths was only added in 3.7+.
directory_str_or_fd: ty.Union[AnyStr, int] = directory_str
if HAVE_FWALK and (not isinstance(directory_str, bytes) or HAVE_FWALK_BYTES):
fd = os.open(directory_str, os.O_RDONLY | O_DIRECTORY)
self._close_fd = directory_str_or_fd = fd
self._generator = self._walk(
directory_str_or_fd,
directory_str,
matcher,
follow_symlinks,
intermediate_dirs
)
def _close_file_descriptor(self) -> None:
if self._close_fd is not None:
os.close(self._close_fd)
self._close_fd = None
def __iter__(self) -> 'walk[AnyStr]':
return self
def __next__(self) -> FSNodeEntry[AnyStr]:
return next(self._generator)
def __enter__(self) -> 'walk[AnyStr]':
return self
def __exit__(self, *a: ty.Any) -> None:
self.close()
def send(self, value: ty.Any) -> FSNodeEntry[AnyStr]:
return self._generator.send(value)
@ty.overload
def throw(self, typ: ty.Type[BaseException], # noqa: E704
val: ty.Union[BaseException, object] = ...,
tb: ty.Optional[types.TracebackType] = ...) -> FSNodeEntry[AnyStr]: ...
@ty.overload
def throw(self, typ: BaseException, val: None = ..., # noqa: E704
tb: ty.Optional[types.TracebackType] = ...) -> FSNodeEntry[AnyStr]: ...
def throw(self, typ: ty.Union[ty.Type[BaseException], BaseException],
val: ty.Union[BaseException, object] = None,
tb: ty.Optional[types.TracebackType] = None) -> FSNodeEntry[AnyStr]:
try:
if isinstance(typ, type):
bt = ty.cast(ty.Type[BaseException], typ) # type: ignore[redundant-cast]
return self._generator.throw(bt, val, tb)
else:
assert val is None
return self._generator.throw(typ, val, tb)
except:
self._close_file_descriptor()
raise
def close(self) -> None:
try:
self.throw(GeneratorExit)
except GeneratorExit:
pass
@staticmethod
def _join_dirs_and_files(dirnames: ty.List[AnyStr], filenames: ty.List[AnyStr]) \
-> ty.Iterator[ty.Tuple[AnyStr, bool]]:
for dirname in dirnames:
yield dirname, True
for filename in filenames:
yield filename, False
@staticmethod
def _walk_separator(
matcher: Matcher[AnyStr],
directory_str: ty.Optional[AnyStr]
) -> ty.Union[bytes, str]:
"""
Determine which separator to use.
Because os.fsencode can return a byte array, we must allow returning a byte array,
regardless of AnyType.
"""
if directory_str is not None:
return utils.maybe_fsencode(os.path.sep, directory_str)
elif matcher is not None and matcher.is_binary:
return os.fsencode(os.path.sep)
else:
return os.path.sep
@staticmethod
def _walk_wide(
dot: AnyStr,
directory: ty.Union[AnyStr, int],
follow_symlinks: bool
) -> ty.Generator[
ty.Tuple[AnyStr, ty.List[AnyStr], ty.List[AnyStr], ty.Optional[int]],
ty.Any,
None
]:
"""
Return a four-part tuple just like os.fwalk does, even if we won't use os.fwalk.
The directory file descriptor will be None when os.fwalk is not used.
"""
if isinstance(directory, int):
yield from os.fwalk(dot, dir_fd=directory, follow_symlinks=follow_symlinks)
else:
for dir_path, dir_names, file_names in os.walk(directory, followlinks=follow_symlinks):
yield dir_path, dir_names, file_names, None
def _walk(
self,
directory: ty.Union[AnyStr, int],
directory_str: ty.Optional[AnyStr],
matcher: Matcher[AnyStr],
follow_symlinks: bool,
intermediate_dirs: bool
) -> ty.Generator[FSNodeEntry[AnyStr], ty.Any, None]:
separator = self._walk_separator(matcher=matcher, directory_str=directory_str)
# TODO: Because os.fsencode can return a byte array, we need to refactor how we use 'sep'
sep: AnyStr = separator # type: ignore[assignment]
dot = utils.maybe_fsencode(".", sep)
# Identify the leading portion of the `dirpath` returned by `os.walk`
# that should be dropped
if not isinstance(directory, int):
while directory.endswith(sep):
directory = directory[:-len(sep)]
prefix = (directory if not isinstance(directory, int) else dot) + sep
reported_directories: ty.Set[AnyStr] = set()
# Always report the top-level directory even if nothing therein is matched
reported_directories.add(utils.maybe_fsencode("", sep))
yield FSNodeEntry(
type=FSNodeType.DIRECTORY,
path=prefix[:-len(sep)],
relpath=dot,
name=dot,
parentfd=None
)
walk_iter = self._walk_wide(dot=dot, directory=directory, follow_symlinks=follow_symlinks)
try:
for dirpath, dirnames, filenames, dirfd in walk_iter:
# Remove the directory prefix from the received path
_, _, dirpath = dirpath.partition(prefix)
# Keep track of reported intermediaries, so that we only check for
# these at most once per directory base
intermediates_reported = False
for filename, is_dir in self._join_dirs_and_files(list(dirnames), filenames):
filepath = os.path.join(dirpath, filename)
# Check if matcher thinks we should descend into this directory
if is_dir and not matcher.should_descend(filepath):
dirnames.remove(filename)
# Check if matcher thinks we should report this node
if not matcher.should_report(filepath, is_dir=is_dir):
continue
# Ensure that all containing directories are reported
# before reporting this node
if not intermediates_reported and intermediate_dirs:
parts = dirpath.split(sep)
for end_offset in range(len(parts)):
parent_dirpath = sep.join(parts[0:(end_offset + 1)])
if parent_dirpath not in reported_directories:
reported_directories.add(parent_dirpath)
yield FSNodeEntry(
type=FSNodeType.DIRECTORY,
path=(prefix + parent_dirpath),
relpath=parent_dirpath,
name=parts[end_offset],
parentfd=None
)
intermediates_reported = True
# Report the target file or directory
if is_dir:
reported_directories.add(filepath)
yield FSNodeEntry(
type=FSNodeType.DIRECTORY,
path=(prefix + filepath),
relpath=filepath,
name=filename,
parentfd=dirfd
)
else:
yield FSNodeEntry(
type=FSNodeType.FILE,
path=(prefix + filepath),
relpath=filepath,
name=filename,
parentfd=dirfd
)
finally:
# Make sure the file descriptors bound by `os.fwalk` are freed on error
walk_iter.close()
self._close_file_descriptor()
if HAVE_FWALK: # pragma: no cover
supports_fd: ty.FrozenSet[ty.Callable[..., ty.Any]] = frozenset({walk})
else: # pragma: no cover
supports_fd = frozenset()
|
src/app/test/metrics_test.py
|
ExpressHermes/beer-garden
| 230 |
130253
|
# -*- coding: utf-8 -*-
from datetime import datetime
from time import sleep
import pytest
from mock import Mock
import beer_garden.metrics as metrics
@pytest.fixture
def prometheus_mocks(monkeypatch):
# TODO - Test http api latency
# monkeypatch.setattr(metrics, "http_api_latency_total", Mock())
monkeypatch.setattr(metrics, "plugin_command_latency", Mock())
monkeypatch.setattr(metrics, "completed_request_counter", Mock())
monkeypatch.setattr(metrics, "request_counter_total", Mock())
monkeypatch.setattr(metrics, "queued_request_gauge", Mock())
monkeypatch.setattr(metrics, "in_progress_request_gauge", Mock())
class TestMetrics(object):
@pytest.mark.parametrize("wait", [0, 0.1, 0.25])
def test_request_latency(self, wait):
now = datetime.utcnow()
sleep(wait)
latency = metrics.request_latency(now)
assert 0.01 > latency - wait
@pytest.mark.parametrize(
"status,queued,in_progress",
[("CREATED", 1, 0), ("IN_PROGRESS", 0, 1), ("SUCCESS", 0, 0)],
)
def test_initialize_counts(
self, prometheus_mocks, monkeypatch, bg_request, status, queued, in_progress
):
bg_request.status = status
monkeypatch.setattr(metrics.db, "query", Mock(return_value=[bg_request]))
metrics.initialize_counts()
assert queued == metrics.queued_request_gauge.labels.return_value.inc.call_count
assert (
in_progress
== metrics.in_progress_request_gauge.labels.return_value.inc.call_count
)
def test_request_created(self, prometheus_mocks, bg_request):
metrics.request_created(bg_request)
metrics.queued_request_gauge.labels.assert_called_once_with(
system=bg_request.system,
system_version=bg_request.system_version,
instance_name=bg_request.instance_name,
)
assert metrics.queued_request_gauge.labels.return_value.inc.call_count == 1
metrics.request_counter_total.labels.assert_called_once_with(
system=bg_request.system,
system_version=bg_request.system_version,
instance_name=bg_request.instance_name,
command=bg_request.command,
)
assert metrics.request_counter_total.labels.return_value.inc.call_count == 1
def test_request_started(self, prometheus_mocks, bg_request):
metrics.request_started(bg_request)
metrics.queued_request_gauge.labels.assert_called_once_with(
system=bg_request.system,
system_version=bg_request.system_version,
instance_name=bg_request.instance_name,
)
assert metrics.queued_request_gauge.labels.return_value.dec.call_count == 1
metrics.in_progress_request_gauge.labels.assert_called_once_with(
system=bg_request.system,
system_version=bg_request.system_version,
instance_name=bg_request.instance_name,
)
assert metrics.in_progress_request_gauge.labels.return_value.inc.call_count == 1
def test_request_completed(self, prometheus_mocks, bg_request):
metrics.request_completed(bg_request)
metrics.in_progress_request_gauge.labels.assert_called_once_with(
system=bg_request.system,
system_version=bg_request.system_version,
instance_name=bg_request.instance_name,
)
assert metrics.in_progress_request_gauge.labels.return_value.dec.call_count == 1
metrics.completed_request_counter.labels.assert_called_once_with(
system=bg_request.system,
system_version=bg_request.system_version,
instance_name=bg_request.instance_name,
command=bg_request.command,
status=bg_request.status,
)
assert metrics.completed_request_counter.labels.return_value.inc.call_count == 1
metrics.plugin_command_latency.labels.assert_called_once_with(
system=bg_request.system,
system_version=bg_request.system_version,
instance_name=bg_request.instance_name,
command=bg_request.command,
status=bg_request.status,
)
assert (
metrics.plugin_command_latency.labels.return_value.observe.call_count == 1
)
|
topicnet/cooking_machine/config_parser.py
|
bt2901/TopicNet
| 123 |
130263
|
"""
Parsing text file into Experiment instance using strictyaml
(github.com/crdoconnor/strictyaml/)
The aim here is to make config:
* possible to use even for non-programmers
* hard to misuse
* easy debuggable
Hence, the process of parsing config is a bit more complicated than
it could be, but it produces more useful error messages. For example:
File $YOUR_CONFIG.yaml, line 42
topic_names: 10
^ this value should be a 'list' instead of 'int'
YAMLValidationError: 'int' passed instead of 'list'
instead of:
File $SOME_FILE.py, line 666, in $SOME_FUNCTION
for topic_name in topic_names:
TypeError: 'int' object is not iterable
To achieve this, strictyaml makes use of various validators which
keep track of individual line numbers and which fragments are already
checked and which aren't quite here yet.
Our process consists of three stages:
1) we check the high-level structure using `BASE_SCHEMA`.
The presence of each required key is ensured.
After this stage we could be sure than we can create a valid model
using specified parameters.
2) we make a second pass and revalidate 'regularizers' and 'stages'
This step is performed semi-automatically: using `inspect`,
we extract everything from `__init__` method signature.
For example:
def __init__(self, num_iters: int = 5)
allows us to infer that num_iters parameter should be int,
but it isn't strictly required.
3) we construct instances of classes required, convert types manually
and implement some shortcuts.
Ideally, this stage should be performed using revalidate() as well,
but it's a work-in-progress currently.
""" # noqa: W291
from inspect import signature, Parameter
from typing import (
Callable,
Type,
)
from .cubes import (
CubeCreator,
RegularizersModifierCube,
GreedyStrategy,
PerplexityStrategy,
)
from .experiment import Experiment
from .dataset import Dataset
from .models import scores as tnscores
from .models import TopicModel
from .model_constructor import (
create_default_topics,
init_simple_default_model,
)
from .rel_toolbox_lite import (
count_vocab_size,
handle_regularizer,
)
import artm
from strictyaml import Map, Str, Int, Seq, Float, Bool
from strictyaml import Any, Optional, EmptyDict, EmptyNone, EmptyList
from strictyaml import dirty_load
SUPPORTED_CUBES = [CubeCreator, RegularizersModifierCube]
SUPPORTED_STRATEGIES = [PerplexityStrategy, GreedyStrategy]
TYPE_VALIDATORS = {
'int': Int(), 'bool': Bool(), 'str': Str(), 'float': Float()
}
def choose_key(param):
"""
Parameters
----------
param : inspect.Parameter
Returns
-------
str or strictyaml.Optional
"""
if param.default is not Parameter.empty:
return Optional(param.name)
return param.name
def choose_validator(param):
"""
Parameters
----------
param : inspect.Parameter
Returns
-------
instance of strictyaml.Validator
"""
if param.annotation is int:
return Int()
if param.annotation is float:
return Float()
if param.annotation is bool:
return Bool()
if param.annotation is str:
return Str()
if param.name in ARTM_TYPES:
return ARTM_TYPES[param.name]
return Any()
# TODO: maybe this is cool, but do we really need this?
def build_schema_from_function(func: Callable) -> dict:
from docstring_parser import parse as docstring_parse
func_params = signature(func).parameters
func_params_schema = dict()
for elem in docstring_parse(func.__doc__).params:
if elem.arg_name in func_params:
key = choose_key(func_params[elem.arg_name])
func_params_schema[key] = TYPE_VALIDATORS[elem.type_name]
return func_params_schema
# TODO: use stackoverflow.com/questions/37929851/parse-numpydoc-docstring-and-access-components
# for now just hardcode most common / important types
ARTM_TYPES = {
"tau": Float(),
"topic_names": Str() | Seq(Str()) | EmptyNone(),
# TODO: handle class_ids in model and in regularizers separately
"class_ids": Str() | Seq(Str()) | EmptyNone(),
"gamma": Float() | EmptyNone(),
"seed": Int(),
"num_document_passes": Int(),
"num_processors": Int(),
"cache_theta": Bool(),
"reuse_theta": Bool(),
"theta_name": Str()
}
_ELEMENT = Any()
# TODO: maybe better _DICTIONARY_FILTER_SCHEMA = build_schema_from_function(artm.Dictionary.filter)
# TODO: modalities, filter params - these all are dataset's options, not model's
# maybe make separate YML block for dataset?
BASE_SCHEMA = Map({
'regularizers': Seq(_ELEMENT),
Optional('scores'): Seq(_ELEMENT),
'stages': Seq(_ELEMENT),
'model': Map({
"dataset_path": Str(),
Optional("dictionary_filter_parameters"): Map({
Optional("class_id"): Str(),
Optional("min_df"): Float(),
Optional("max_df"): Float(),
Optional("min_df_rate"): Float(),
Optional("max_df_rate"): Float(),
Optional("min_tf"): Float(),
Optional("max_tf"): Float(),
Optional("max_dictionary_size"): Float(),
Optional("recalculate_value"): Bool(),
}),
Optional("keep_in_memory"): Bool(),
Optional("internals_folder_path"): Bool(),
Optional("modalities_to_use"): Seq(Str()),
Optional("modalities_weights"): Any(),
"main_modality": Str(),
}),
'topics': Map({
"background_topics": Seq(Str()) | Int() | EmptyList(),
"specific_topics": Seq(Str()) | Int() | EmptyList(),
})
})
KEY_DICTIONARY_FILTER_PARAMETERS = 'dictionary_filter_parameters'
def build_schema_from_signature(class_of_object, use_optional=True):
"""
Parameters
----------
class_of_object : class
Returns
-------
dict
each element is either str -> Validator or Optional(str) -> Validator
"""
choose_key_func = choose_key if use_optional else (lambda param: param.name)
return {choose_key_func(param): choose_validator(param)
for param in signature(class_of_object.__init__).parameters.values()
if param.name != 'self'}
def wrap_in_map(dictionary):
could_be_empty = all(isinstance(key, Optional) for key in dictionary)
if could_be_empty:
return Map(dictionary) | EmptyDict()
return Map(dictionary)
def build_schema_for_scores():
"""
Returns
-------
strictyaml.Map
schema used for validation and type-coercion
"""
schemas = {}
for elem in artm.scores.__all__:
if "Score" in elem:
class_of_object = getattr(artm.scores, elem)
# TODO: check if every key is Optional. If it is, then "| EmptyDict()"
# otherwise, just Map()
res = wrap_in_map(build_schema_from_signature(class_of_object))
specific_schema = Map({class_of_object.__name__: res})
schemas[class_of_object.__name__] = specific_schema
for elem in tnscores.__all__:
if "Score" in elem:
class_of_object = getattr(tnscores, elem)
res = build_schema_from_signature(class_of_object)
# res["name"] = Str() # TODO: support custom names
res = wrap_in_map(res)
specific_schema = Map({class_of_object.__name__: res})
schemas[class_of_object.__name__] = specific_schema
return schemas
def build_schema_for_regs():
"""
Returns
-------
strictyaml.Map
schema used for validation and type-coercion
"""
schemas = {}
for elem in artm.regularizers.__all__:
if "Regularizer" in elem:
class_of_object = getattr(artm.regularizers, elem)
res = build_schema_from_signature(class_of_object)
if elem in ["SmoothSparseThetaRegularizer", "SmoothSparsePhiRegularizer",
"DecorrelatorPhiRegularizer"]:
res[Optional("relative", default=None)] = Bool()
res = wrap_in_map(res)
specific_schema = Map({class_of_object.__name__: res})
schemas[class_of_object.__name__] = specific_schema
return schemas
def is_key_in_schema(key, schema):
if key in schema:
return True
return any(
key_val.key == key for key_val in schema
if isinstance(key_val, Optional)
)
def build_schema_for_cubes():
"""
Returns
-------
dict
each element is str -> strictyaml.Map
where key is name of cube,
value is a schema used for validation and type-coercion
"""
schemas = {}
for class_of_object in SUPPORTED_CUBES:
res = build_schema_from_signature(class_of_object)
# "selection" isn't used in __init__, but we will need it later
res["selection"] = Seq(Str())
# shortcut for strategy intialization
if is_key_in_schema("strategy", res):
signature_validation = {}
for strategy_class in SUPPORTED_STRATEGIES:
local_signature_validation = build_schema_from_signature(strategy_class)
signature_validation.update(local_signature_validation)
res[Optional("strategy_params")] = Map(signature_validation)
# we will deal with "values" later, but we can check at least some simple things already
if class_of_object.__name__ == "CubeCreator":
element = Map({"name": Str(), "values": Seq(Any())})
res["parameters"] = Seq(element)
if class_of_object.__name__ == "RegularizersModifierCube":
element = Map({
Optional("name"): Str(),
Optional("regularizer"): Any(),
Optional("tau_grid"): Seq(Float())
})
res["regularizer_parameters"] = element | Seq(element)
res = Map(res)
specific_schema = Map({class_of_object.__name__: res})
schemas[class_of_object.__name__] = specific_schema
return schemas
def preprocess_parameters_for_cube_creator(elem_args):
"""
This function does two things:
1) convert class_ids from
name: class_ids@text, values: [0, 1, 2, 3]
to
name: class_ids, values: {"@text": [0, 1, 2, 3]}
2) type conversion for "values" field.
Parameters
----------
elem_args: strictyaml.YAML object
(contains dict inside)
Returns
-------
new_elem_args: dict
"""
for param_portion in elem_args["parameters"]:
name = str(param_portion["name"])
if name.startswith("class_ids"):
validator = Float() | Seq(Float())
else:
validator = Seq(ARTM_TYPES[name])
param_schema = Map({
"name": Str(),
"values": validator
})
param_portion.revalidate(param_schema)
def handle_special_cases(elem_args, kwargs):
"""
In-place fixes kwargs, handling special cases and shortcuts
(only strategy for now)
Parameters
----------
elem_args: dict
kwargs: dict
"""
# special case: shortcut for strategy
if "strategy" in elem_args:
strategy = None
for strategy_class in SUPPORTED_STRATEGIES:
if strategy_class.__name__ == elem_args["strategy"]:
strat_schema = build_schema_from_signature(strategy_class, use_optional=False)
strat_kwargs = {}
for key, value in elem_args["strategy_params"].items():
key = str(key)
value.revalidate(strat_schema[key])
strat_kwargs[key] = value.data
strategy = strategy_class(**strat_kwargs)
kwargs["strategy"] = strategy # or None if failed to identify it
def build_score(elemtype, elem_args, is_artm_score):
"""
Parameters
----------
elemtype : str
name of score
elem_args: dict
is_artm_score: bool
Returns
-------
instance of artm.scores.BaseScore or topicnet.cooking_machine.models.base_score
"""
module = artm.scores if is_artm_score else tnscores
class_of_object = getattr(module, elemtype)
kwargs = {name: value
for name, value in elem_args.items()}
return class_of_object(**kwargs)
def build_regularizer(elemtype, elem_args, specific_topic_names, background_topic_names):
"""
Parameters
----------
elemtype : str
name of regularizer
elem_args: dict
parsed: strictyaml.YAML object
Returns
-------
instance of artm.Regularizer
"""
class_of_object = getattr(artm.regularizers, elemtype)
kwargs = {name: value
for name, value in elem_args.items()}
# special case: shortcut for topic_names
if "topic_names" in kwargs:
if kwargs["topic_names"] == "background_topics":
kwargs["topic_names"] = background_topic_names
if kwargs["topic_names"] == "specific_topics":
kwargs["topic_names"] = specific_topic_names
return class_of_object(**kwargs)
def build_cube_settings(elemtype, elem_args):
"""
Parameters
----------
elemtype : str
name of regularizer
elem_args: strictyaml.YAML object
(contains dict inside)
Returns
-------
list of dict
"""
if elemtype == "CubeCreator":
preprocess_parameters_for_cube_creator(elem_args)
kwargs = {name: value
for name, value in elem_args.data.items()
if name not in ['selection', 'strategy', 'strategy_params']}
handle_special_cases(elem_args, kwargs)
return {elemtype: kwargs,
"selection": elem_args['selection'].data}
def _add_parsed_scores(parsed, topic_model):
""" """
for score in parsed.data.get('scores', []):
for elemtype, elem_args in score.items():
is_artm_score = elemtype in artm.scores.__all__
score_object = build_score(elemtype, elem_args, is_artm_score)
if is_artm_score:
topic_model._model.scores.add(score_object, overwrite=True)
else:
topic_model.custom_scores[elemtype] = score_object
def _add_parsed_regularizers(
parsed, model, specific_topic_names, background_topic_names, data_stats
):
""" """
regularizers = []
for stage in parsed.data['regularizers']:
for elemtype, elem_args in stage.items():
should_be_relative = None
if "relative" in elem_args:
should_be_relative = elem_args["relative"]
elem_args.pop("relative")
regularizer_object = build_regularizer(
elemtype, elem_args, specific_topic_names, background_topic_names
)
handle_regularizer(should_be_relative, model, regularizer_object, data_stats)
regularizers.append(model.regularizers[regularizer_object.name])
return regularizers
def parse_modalities_data(parsed):
has_modalities_to_use = is_key_in_schema("modalities_to_use", parsed["model"])
has_weights = is_key_in_schema("modalities_weights", parsed["model"])
main_modality = parsed["model"]["main_modality"]
# exactly one should be specified
if has_modalities_to_use == has_weights:
raise ValueError("Either 'modalities_to_use' or 'modalities_weights' should be specified")
if has_weights:
modalities_to_use = list(parsed["model"]["modalities_weights"].data)
if main_modality not in modalities_to_use:
modalities_to_use.append(main_modality)
local_schema = Map({
key: Float() for key in modalities_to_use
})
parsed["model"]["modalities_weights"].revalidate(local_schema)
modalities_weights = parsed["model"]["modalities_weights"].data
return modalities_weights
else:
modalities_to_use = parsed.data["model"]["modalities_to_use"]
return modalities_to_use
def parse(
yaml_string: str,
force_separate_thread: bool = False,
dataset_class: Type[Dataset] = Dataset
):
"""
Parameters
----------
yaml_string : str
force_separate_thread : bool
dataset_class : class
Returns
-------
cube_settings: list of dict
regularizers: list
topic_model: TopicModel
dataset: Dataset
"""
parsed = dirty_load(yaml_string, BASE_SCHEMA, allow_flow_style=True)
specific_topic_names, background_topic_names = create_default_topics(
parsed.data["topics"]["specific_topics"],
parsed.data["topics"]["background_topics"]
)
revalidate_section(parsed, "stages")
revalidate_section(parsed, "regularizers")
if "scores" in parsed:
revalidate_section(parsed, "scores")
dataset = dataset_class(
data_path=parsed.data["model"]["dataset_path"],
keep_in_memory=parsed.data["model"].get("keep_in_memory", True),
internals_folder_path=parsed.data["model"].get("internals_folder_path", None),
)
filter_parameters = parsed.data["model"].get(
KEY_DICTIONARY_FILTER_PARAMETERS, dict()
)
if len(filter_parameters) > 0:
filtered_dictionary = dataset.get_dictionary().filter(**filter_parameters)
dataset._cached_dict = filtered_dictionary
modalities_to_use = parse_modalities_data(parsed)
data_stats = count_vocab_size(dataset.get_dictionary(), modalities_to_use)
model = init_simple_default_model(
dataset=dataset,
modalities_to_use=modalities_to_use,
main_modality=parsed.data["model"]["main_modality"],
specific_topics=parsed.data["topics"]["specific_topics"],
background_topics=parsed.data["topics"]["background_topics"],
)
regularizers = _add_parsed_regularizers(
parsed, model, specific_topic_names, background_topic_names, data_stats
)
topic_model = TopicModel(model)
_add_parsed_scores(parsed, topic_model)
cube_settings = list()
for stage in parsed['stages']:
for elemtype, elem_args in stage.items():
settings = build_cube_settings(elemtype.data, elem_args)
settings[elemtype]["separate_thread"] = force_separate_thread
cube_settings.append(settings)
return cube_settings, regularizers, topic_model, dataset
def revalidate_section(parsed, section):
"""
Perofrms in-place type coercion and validation
Parameters
----------
parsed : strictyaml.YAML object
(half-parsed, half-validated chunk of config)
section: str
"""
if section == "stages":
schemas = build_schema_for_cubes()
elif section == "regularizers":
schemas = build_schema_for_regs()
elif section == "scores":
schemas = build_schema_for_scores()
else:
raise ValueError(f"Unknown section name '{section}'")
for i, stage in enumerate(parsed[section]):
assert len(stage) == 1
name = list(stage.data)[0]
if name not in schemas:
raise ValueError(f"Unsupported {section} value: {name} at line {stage.start_line}")
local_schema = schemas[name]
stage.revalidate(local_schema)
def build_experiment_environment_from_yaml_config(
yaml_string,
experiment_id,
save_path,
force_separate_thread=False,
):
"""
Wraps up parameter extraction and class instances creation
from yaml formatted string
together with the method that builds experiment pipeline from
given experiment parameters (model, cubes, regularizers, etc)
Parameters
----------
yaml_string: str
config that contains the whole experiment pipeline description
with its parameters
save_path: str
path to the folder to save experiment logs and models
experiment_id: str
name of the experiment folder
force_separate_thread: bool default = False
experimental feature that packs model training into
separate process which is killed upon training completion
by default is not used
Returns
-------
tuple experiment, dataset instances of corresponding classes from topicnet
"""
settings, regs, model, dataset = parse(yaml_string, force_separate_thread)
# TODO: handle dynamic addition of regularizers
experiment = Experiment(experiment_id=experiment_id, save_path=save_path, topic_model=model)
experiment.build(settings)
return experiment, dataset
|
argoverse/utils/heuristic_ground_removal.py
|
gargrohin/argoverse-api
| 560 |
130270
|
<reponame>gargrohin/argoverse-api<filename>argoverse/utils/heuristic_ground_removal.py<gh_stars>100-1000
# <Copyright 2019, Argo AI, LLC. Released under the MIT license.>
import math
from typing import List
import numpy as np
LIDAR_RANGE = 250
GRID_DIST = 0.4
HEIGHT_VARIANCE_THRESHOLD = 0.001
HEIGHT_MEAN_THRESHOLD = -1
NUM_ANGLE_BINS = 2000
def filter_ground_pts_polar_grid_mean_var(lidar_pts: np.ndarray) -> np.ndarray:
"""
We divide the world into polar voxels.
We aggregate the height statistics of all of the points that fall into each polar voxel.
If the mean is below a threshold, we call it a ground voxel.
If the z-axis variance is very low, we also call it a ground voxel.
Var(X) = E[X^2] - E[X]^2
Args:
lidar_pts: NumPy n-d array of shape (n,3)
Returns:
non_ground_lidar_pts: NumPy n-d array of shape (n,3)
"""
print("Total number of points (before filtering): ", lidar_pts.shape)
non_ground_lidar_pts: List[List[List[np.ndarray]]] = []
xyz_mean = np.mean(lidar_pts, axis=0)
# Zero-center the point cloud because we compute statistics around (0,0,0)-centered polar grid
lidar_pts -= xyz_mean
num_radial_bins = int(LIDAR_RANGE / GRID_DIST)
angle_increment = 2 * math.pi / NUM_ANGLE_BINS
ang_voxel_mean = np.zeros((NUM_ANGLE_BINS, num_radial_bins))
ang_voxel_variance = np.zeros((NUM_ANGLE_BINS, num_radial_bins))
num_elements_per_bin = np.zeros((NUM_ANGLE_BINS, num_radial_bins))
pts_per_bin: List[List[List[np.ndarray]]] = [[[] for _ in range(num_radial_bins)] for _ in range(NUM_ANGLE_BINS)]
for i in range(lidar_pts.shape[0]):
x = lidar_pts[i, 0]
y = lidar_pts[i, 1]
z = lidar_pts[i, 2]
dist_away = math.sqrt(x ** 2 + y ** 2)
angle_rad = np.arctan2(y, x)
if angle_rad <= 0:
angle_rad += 2 * math.pi
radial_bin = int(math.floor(dist_away / GRID_DIST))
angle_bin = int(math.floor(angle_rad / angle_increment))
ang_voxel_mean[angle_bin, radial_bin] += z
ang_voxel_variance[angle_bin, radial_bin] += z ** 2
num_elements_per_bin[angle_bin, radial_bin] += 1.0
pts_per_bin[angle_bin][radial_bin].append(lidar_pts[i, :])
for i in range(NUM_ANGLE_BINS):
for j in range(num_radial_bins):
if len(pts_per_bin[i][j]) > 0:
ang_voxel_mean[i, j] /= num_elements_per_bin[i, j]
ang_voxel_variance[i, j] = (ang_voxel_variance[i, j] / num_elements_per_bin[i, j]) - ang_voxel_mean[
i, j
] ** 2
if (ang_voxel_mean[i, j] > HEIGHT_MEAN_THRESHOLD) or (
ang_voxel_variance[i, j] > HEIGHT_VARIANCE_THRESHOLD
):
non_ground_lidar_pts += pts_per_bin[i][j]
non_ground_lidar_pts_np = np.array(non_ground_lidar_pts)
print("Number of non-ground points: ", non_ground_lidar_pts_np.shape)
non_ground_lidar_pts += xyz_mean
return non_ground_lidar_pts
|
semantic_segmentation/configs/convnext/upernet_convnext_tiny_512_160k_ade20k_ss.py
|
ParikhKadam/ConvNeXt
| 3,453 |
130271
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
_base_ = [
'../_base_/models/upernet_convnext.py', '../_base_/datasets/ade20k.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
model = dict(
backbone=dict(
type='ConvNeXt',
in_chans=3,
depths=[3, 3, 9, 3],
dims=[96, 192, 384, 768],
drop_path_rate=0.4,
layer_scale_init_value=1.0,
out_indices=[0, 1, 2, 3],
),
decode_head=dict(
in_channels=[96, 192, 384, 768],
num_classes=150,
),
auxiliary_head=dict(
in_channels=384,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341)),
)
optimizer = dict(constructor='LearningRateDecayOptimizerConstructor', _delete_=True, type='AdamW',
lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05,
paramwise_cfg={'decay_rate': 0.9,
'decay_type': 'stage_wise',
'num_layers': 6})
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
|
skfem/element/element_tri/element_tri_hermite.py
|
reverendbedford/scikit-fem
| 238 |
130305
|
<filename>skfem/element/element_tri/element_tri_hermite.py
import numpy as np
from ..element_global import ElementGlobal
from ...refdom import RefTri
class ElementTriHermite(ElementGlobal):
"""The Hermite element with 3 DOFs per vertex and one interior DOF."""
nodal_dofs = 3
interior_dofs = 1
maxdeg = 3
dofnames = ['u', 'u_x', 'u_y', 'u']
doflocs = np.array([[0., 0.],
[0., 0.],
[0., 0.],
[1., 0.],
[1., 0.],
[1., 0.],
[0., 1.],
[0., 1.],
[0., 1.],
[1 / 3, 1 / 3]])
refdom = RefTri
def gdof(self, F, w, i):
if i < 9:
j = i % 3
k = int(i / 3)
if j == 0:
return F[()](*w['v'][k])
elif j == 1:
return F[(0,)](*w['v'][k])
elif j == 2:
return F[(1,)](*w['v'][k])
elif i == 9:
mx = (w['v'][0][0] + w['v'][1][0] + w['v'][2][0]) / 3
my = (w['v'][0][1] + w['v'][1][1] + w['v'][2][1]) / 3
return F[()](mx, my)
self._index_error()
|
test/unit/transform/__init__.py
|
Wheelspawn/pudl
| 285 |
130314
|
<gh_stars>100-1000
"""Unit tests for the pudl.transform subpackage."""
|
gaphor/core/modeling/tests/test_diagram.py
|
vanillajonathan/gaphor
| 867 |
130329
|
import gaphas
import pytest
from gaphor.core.modeling import Diagram, Presentation, StyleSheet
class Example(gaphas.Element, Presentation):
def __init__(self, diagram, id):
super().__init__(connections=diagram.connections, diagram=diagram, id=id)
def unlink(self):
self.test_unlinked = True
super().unlink()
class ExampleLine(gaphas.Line, Presentation):
def __init__(self, diagram, id):
super().__init__(connections=diagram.connections, diagram=diagram, id=id)
def unlink(self):
self.test_unlinked = True
super().unlink()
@pytest.fixture
def diagram(element_factory):
return element_factory.create(Diagram)
def test_diagram_can_be_used_as_gtkview_model():
diagram = Diagram("id", None)
assert isinstance(diagram, gaphas.view.model.Model)
def test_canvas_is_saved():
diagram = Diagram("id", None)
saved_keys = []
diagram.save(lambda name, val: saved_keys.append(name))
assert "canvas" not in saved_keys
def test_canvas_item_is_created(element_factory):
diagram = element_factory.create(Diagram)
example = diagram.create(Example)
assert example in diagram.get_all_items()
assert example.diagram is diagram
def test_canvas_is_unlinked(element_factory):
diagram = element_factory.create(Diagram)
example = diagram.create(Example)
diagram.unlink()
assert example.test_unlinked
def test_can_only_add_diagram_items(element_factory):
diagram = element_factory.create(Diagram)
with pytest.raises(TypeError):
diagram.create(Diagram)
def test_diagram_stylesheet(element_factory):
diagram = element_factory.create(Diagram)
styleSheet = element_factory.create(StyleSheet)
assert diagram.styleSheet is styleSheet
class ViewMock:
def __init__(self):
self.removed_items = set()
def request_update(self, items, removed_items) -> None:
self.removed_items.update(removed_items)
def test_remove_presentation_triggers_view(element_factory):
diagram = element_factory.create(Diagram)
view = ViewMock()
diagram.register_view(view)
example = diagram.create(Example)
example.unlink()
assert example.diagram is None
assert example not in diagram.ownedPresentation
assert example in view.removed_items
def test_order_presentations_lines_are_last(diagram):
example_line = diagram.create(ExampleLine)
example = diagram.create(Example)
assert list(diagram.get_all_items()) == [example, example_line]
def test_order_presentations_line_is_grouped(diagram):
example_line = diagram.create(ExampleLine)
example_1 = diagram.create(Example)
example_2 = diagram.create(Example)
example_line.parent = example_1
assert list(diagram.get_all_items()) == [example_1, example_2, example_line]
def test_order_grouped_presentations(diagram):
example_1 = diagram.create(Example)
example_2 = diagram.create(Example)
example_1.parent = example_2
assert list(diagram.get_all_items()) == [example_2, example_1]
|
sequence_folders.py
|
flamehaze1115/DPSNet
| 217 |
130349
|
<filename>sequence_folders.py
import torch.utils.data as data
import numpy as np
from scipy.misc import imread
from path import Path
import random
def load_as_float(path):
return imread(path).astype(np.float32)
class SequenceFolder(data.Dataset):
"""A sequence data loader where the files are arranged in this way:
root/scene_1/0000000.jpg
root/scene_1/0000001.jpg
..
root/scene_1/cam.txt
root/scene_2/0000000.jpg
.
transform functions must take in a list a images and a numpy array (usually intrinsics matrix)
"""
def __init__(self, root, seed=None, ttype='train.txt', sequence_length=2, transform=None, target_transform=None):
np.random.seed(seed)
random.seed(seed)
self.root = Path(root)
scene_list_path = self.root/ttype
scenes = [self.root/folder[:-1] for folder in open(scene_list_path)]
self.ttype = ttype
self.scenes = sorted(scenes)
self.transform = transform
self.crawl_folders(sequence_length)
def crawl_folders(self, sequence_length):
sequence_set = []
demi_length = sequence_length//2
for scene in self.scenes:
intrinsics = np.genfromtxt(scene/'cam.txt').astype(np.float32).reshape((3, 3))
poses = np.genfromtxt(scene/'poses.txt').astype(np.float32)
imgs = sorted(scene.files('*.jpg'))
if len(imgs) < sequence_length:
continue
for i in range(len(imgs)):
if i < demi_length:
shifts = list(range(0,sequence_length))
shifts.pop(i)
elif i >= len(imgs)-demi_length:
shifts = list(range(len(imgs)-sequence_length,len(imgs)))
shifts.pop(i-len(imgs))
else:
shifts = list(range(i-demi_length, i+(sequence_length+1)//2))
shifts.pop(demi_length)
img = imgs[i]
depth = img.dirname()/img.name[:-4] + '.npy'
pose_tgt = np.concatenate((poses[i,:].reshape((3,4)), np.array([[0,0,0,1]])), axis=0)
sample = {'intrinsics': intrinsics, 'tgt': img, 'tgt_depth': depth, 'ref_imgs': [], 'ref_poses': []}
for j in shifts:
sample['ref_imgs'].append(imgs[j])
pose_src = np.concatenate((poses[j,:].reshape((3,4)), np.array([[0,0,0,1]])), axis=0)
pose_rel = pose_src @ np.linalg.inv(pose_tgt)
pose = pose_rel[:3,:].reshape((1,3,4)).astype(np.float32)
sample['ref_poses'].append(pose)
sequence_set.append(sample)
if self.ttype == 'train.txt':
random.shuffle(sequence_set)
self.samples = sequence_set
def __getitem__(self, index):
sample = self.samples[index]
tgt_img = load_as_float(sample['tgt'])
tgt_depth = np.load(sample['tgt_depth'])
ref_imgs = [load_as_float(ref_img) for ref_img in sample['ref_imgs']]
ref_poses = sample['ref_poses']
if self.transform is not None:
imgs, tgt_depth, intrinsics = self.transform([tgt_img] + ref_imgs, tgt_depth, np.copy(sample['intrinsics']))
tgt_img = imgs[0]
ref_imgs = imgs[1:]
else:
intrinsics = np.copy(sample['intrinsics'])
return tgt_img, ref_imgs, ref_poses, intrinsics, np.linalg.inv(intrinsics), tgt_depth
def __len__(self):
return len(self.samples)
|
gunicorn/arbiter.py
|
Bloomstack/gunicorn
| 6,851 |
130378
|
<reponame>Bloomstack/gunicorn
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import errno
import os
import random
import select
import signal
import sys
import time
import traceback
from gunicorn.errors import HaltServer, AppImportError
from gunicorn.pidfile import Pidfile
from gunicorn import sock, systemd, util
from gunicorn import __version__, SERVER_SOFTWARE
class Arbiter(object):
"""
Arbiter maintain the workers processes alive. It launches or
kills them if needed. It also manages application reloading
via SIGHUP/USR2.
"""
# A flag indicating if a worker failed to
# to boot. If a worker process exist with
# this error code, the arbiter will terminate.
WORKER_BOOT_ERROR = 3
# A flag indicating if an application failed to be loaded
APP_LOAD_ERROR = 4
START_CTX = {}
LISTENERS = []
WORKERS = {}
PIPE = []
# I love dynamic languages
SIG_QUEUE = []
SIGNALS = [getattr(signal, "SIG%s" % x)
for x in "HUP QUIT INT TERM TTIN TTOU USR1 USR2 WINCH".split()]
SIG_NAMES = dict(
(getattr(signal, name), name[3:].lower()) for name in dir(signal)
if name[:3] == "SIG" and name[3] != "_"
)
def __init__(self, app):
os.environ["SERVER_SOFTWARE"] = SERVER_SOFTWARE
self._num_workers = None
self._last_logged_active_worker_count = None
self.log = None
self.setup(app)
self.pidfile = None
self.systemd = False
self.worker_age = 0
self.reexec_pid = 0
self.master_pid = 0
self.master_name = "Master"
cwd = util.getcwd()
args = sys.argv[:]
args.insert(0, sys.executable)
# init start context
self.START_CTX = {
"args": args,
"cwd": cwd,
0: sys.executable
}
def _get_num_workers(self):
return self._num_workers
def _set_num_workers(self, value):
old_value = self._num_workers
self._num_workers = value
self.cfg.nworkers_changed(self, value, old_value)
num_workers = property(_get_num_workers, _set_num_workers)
def setup(self, app):
self.app = app
self.cfg = app.cfg
if self.log is None:
self.log = self.cfg.logger_class(app.cfg)
# reopen files
if 'GUNICORN_FD' in os.environ:
self.log.reopen_files()
self.worker_class = self.cfg.worker_class
self.address = self.cfg.address
self.num_workers = self.cfg.workers
self.timeout = self.cfg.timeout
self.proc_name = self.cfg.proc_name
self.log.debug('Current configuration:\n{0}'.format(
'\n'.join(
' {0}: {1}'.format(config, value.value)
for config, value
in sorted(self.cfg.settings.items(),
key=lambda setting: setting[1]))))
# set enviroment' variables
if self.cfg.env:
for k, v in self.cfg.env.items():
os.environ[k] = v
if self.cfg.preload_app:
self.app.wsgi()
def start(self):
"""\
Initialize the arbiter. Start listening and set pidfile if needed.
"""
self.log.info("Starting gunicorn %s", __version__)
if 'GUNICORN_PID' in os.environ:
self.master_pid = int(os.environ.get('GUNICORN_PID'))
self.proc_name = self.proc_name + ".2"
self.master_name = "Master.2"
self.pid = os.getpid()
if self.cfg.pidfile is not None:
pidname = self.cfg.pidfile
if self.master_pid != 0:
pidname += ".2"
self.pidfile = Pidfile(pidname)
self.pidfile.create(self.pid)
self.cfg.on_starting(self)
self.init_signals()
if not self.LISTENERS:
fds = None
listen_fds = systemd.listen_fds()
if listen_fds:
self.systemd = True
fds = range(systemd.SD_LISTEN_FDS_START,
systemd.SD_LISTEN_FDS_START + listen_fds)
elif self.master_pid:
fds = []
for fd in os.environ.pop('GUNICORN_FD').split(','):
fds.append(int(fd))
self.LISTENERS = sock.create_sockets(self.cfg, self.log, fds)
listeners_str = ",".join([str(l) for l in self.LISTENERS])
self.log.debug("Arbiter booted")
self.log.info("Listening at: %s (%s)", listeners_str, self.pid)
self.log.info("Using worker: %s", self.cfg.worker_class_str)
systemd.sd_notify("READY=1\nSTATUS=Gunicorn arbiter booted", self.log)
# check worker class requirements
if hasattr(self.worker_class, "check_config"):
self.worker_class.check_config(self.cfg, self.log)
self.cfg.when_ready(self)
def init_signals(self):
"""\
Initialize master signal handling. Most of the signals
are queued. Child signals only wake up the master.
"""
# close old PIPE
for p in self.PIPE:
os.close(p)
# initialize the pipe
self.PIPE = pair = os.pipe()
for p in pair:
util.set_non_blocking(p)
util.close_on_exec(p)
self.log.close_on_exec()
# initialize all signals
for s in self.SIGNALS:
signal.signal(s, self.signal)
signal.signal(signal.SIGCHLD, self.handle_chld)
def signal(self, sig, frame):
if len(self.SIG_QUEUE) < 5:
self.SIG_QUEUE.append(sig)
self.wakeup()
def run(self):
"Main master loop."
self.start()
util._setproctitle("master [%s]" % self.proc_name)
try:
self.manage_workers()
while True:
self.maybe_promote_master()
sig = self.SIG_QUEUE.pop(0) if self.SIG_QUEUE else None
if sig is None:
self.sleep()
self.murder_workers()
self.manage_workers()
continue
if sig not in self.SIG_NAMES:
self.log.info("Ignoring unknown signal: %s", sig)
continue
signame = self.SIG_NAMES.get(sig)
handler = getattr(self, "handle_%s" % signame, None)
if not handler:
self.log.error("Unhandled signal: %s", signame)
continue
self.log.info("Handling signal: %s", signame)
handler()
self.wakeup()
except (StopIteration, KeyboardInterrupt):
self.halt()
except HaltServer as inst:
self.halt(reason=inst.reason, exit_status=inst.exit_status)
except SystemExit:
raise
except Exception:
self.log.info("Unhandled exception in main loop",
exc_info=True)
self.stop(False)
if self.pidfile is not None:
self.pidfile.unlink()
sys.exit(-1)
def handle_chld(self, sig, frame):
"SIGCHLD handling"
self.reap_workers()
self.wakeup()
def handle_hup(self):
"""\
HUP handling.
- Reload configuration
- Start the new worker processes with a new configuration
- Gracefully shutdown the old worker processes
"""
self.log.info("Hang up: %s", self.master_name)
self.reload()
def handle_term(self):
"SIGTERM handling"
raise StopIteration
def handle_int(self):
"SIGINT handling"
self.stop(False)
raise StopIteration
def handle_quit(self):
"SIGQUIT handling"
self.stop(False)
raise StopIteration
def handle_ttin(self):
"""\
SIGTTIN handling.
Increases the number of workers by one.
"""
self.num_workers += 1
self.manage_workers()
def handle_ttou(self):
"""\
SIGTTOU handling.
Decreases the number of workers by one.
"""
if self.num_workers <= 1:
return
self.num_workers -= 1
self.manage_workers()
def handle_usr1(self):
"""\
SIGUSR1 handling.
Kill all workers by sending them a SIGUSR1
"""
self.log.reopen_files()
self.kill_workers(signal.SIGUSR1)
def handle_usr2(self):
"""\
SIGUSR2 handling.
Creates a new arbiter/worker set as a fork of the current
arbiter without affecting old workers. Use this to do live
deployment with the ability to backout a change.
"""
self.reexec()
def handle_winch(self):
"""SIGWINCH handling"""
if self.cfg.daemon:
self.log.info("graceful stop of workers")
self.num_workers = 0
self.kill_workers(signal.SIGTERM)
else:
self.log.debug("SIGWINCH ignored. Not daemonized")
def maybe_promote_master(self):
if self.master_pid == 0:
return
if self.master_pid != os.getppid():
self.log.info("Master has been promoted.")
# reset master infos
self.master_name = "Master"
self.master_pid = 0
self.proc_name = self.cfg.proc_name
del os.environ['GUNICORN_PID']
# rename the pidfile
if self.pidfile is not None:
self.pidfile.rename(self.cfg.pidfile)
# reset proctitle
util._setproctitle("master [%s]" % self.proc_name)
def wakeup(self):
"""\
Wake up the arbiter by writing to the PIPE
"""
try:
os.write(self.PIPE[1], b'.')
except IOError as e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
def halt(self, reason=None, exit_status=0):
""" halt arbiter """
self.stop()
self.log.info("Shutting down: %s", self.master_name)
if reason is not None:
self.log.info("Reason: %s", reason)
if self.pidfile is not None:
self.pidfile.unlink()
self.cfg.on_exit(self)
sys.exit(exit_status)
def sleep(self):
"""\
Sleep until PIPE is readable or we timeout.
A readable PIPE means a signal occurred.
"""
try:
ready = select.select([self.PIPE[0]], [], [], 1.0)
if not ready[0]:
return
while os.read(self.PIPE[0], 1):
pass
except (select.error, OSError) as e:
# TODO: select.error is a subclass of OSError since Python 3.3.
error_number = getattr(e, 'errno', e.args[0])
if error_number not in [errno.EAGAIN, errno.EINTR]:
raise
except KeyboardInterrupt:
sys.exit()
def stop(self, graceful=True):
"""\
Stop workers
:attr graceful: boolean, If True (the default) workers will be
killed gracefully (ie. trying to wait for the current connection)
"""
unlink = (
self.reexec_pid == self.master_pid == 0
and not self.systemd
and not self.cfg.reuse_port
)
sock.close_sockets(self.LISTENERS, unlink)
self.LISTENERS = []
sig = signal.SIGTERM
if not graceful:
sig = signal.SIGQUIT
limit = time.time() + self.cfg.graceful_timeout
# instruct the workers to exit
self.kill_workers(sig)
# wait until the graceful timeout
while self.WORKERS and time.time() < limit:
time.sleep(0.1)
self.kill_workers(signal.SIGKILL)
def reexec(self):
"""\
Relaunch the master and workers.
"""
if self.reexec_pid != 0:
self.log.warning("USR2 signal ignored. Child exists.")
return
if self.master_pid != 0:
self.log.warning("USR2 signal ignored. Parent exists.")
return
master_pid = os.getpid()
self.reexec_pid = os.fork()
if self.reexec_pid != 0:
return
self.cfg.pre_exec(self)
environ = self.cfg.env_orig.copy()
environ['GUNICORN_PID'] = str(master_pid)
if self.systemd:
environ['LISTEN_PID'] = str(os.getpid())
environ['LISTEN_FDS'] = str(len(self.LISTENERS))
else:
environ['GUNICORN_FD'] = ','.join(
str(l.fileno()) for l in self.LISTENERS)
os.chdir(self.START_CTX['cwd'])
# exec the process using the original environment
os.execvpe(self.START_CTX[0], self.START_CTX['args'], environ)
def reload(self):
old_address = self.cfg.address
# reset old environment
for k in self.cfg.env:
if k in self.cfg.env_orig:
# reset the key to the value it had before
# we launched gunicorn
os.environ[k] = self.cfg.env_orig[k]
else:
# delete the value set by gunicorn
try:
del os.environ[k]
except KeyError:
pass
# reload conf
self.app.reload()
self.setup(self.app)
# reopen log files
self.log.reopen_files()
# do we need to change listener ?
if old_address != self.cfg.address:
# close all listeners
for l in self.LISTENERS:
l.close()
# init new listeners
self.LISTENERS = sock.create_sockets(self.cfg, self.log)
listeners_str = ",".join([str(l) for l in self.LISTENERS])
self.log.info("Listening at: %s", listeners_str)
# do some actions on reload
self.cfg.on_reload(self)
# unlink pidfile
if self.pidfile is not None:
self.pidfile.unlink()
# create new pidfile
if self.cfg.pidfile is not None:
self.pidfile = Pidfile(self.cfg.pidfile)
self.pidfile.create(self.pid)
# set new proc_name
util._setproctitle("master [%s]" % self.proc_name)
# spawn new workers
for _ in range(self.cfg.workers):
self.spawn_worker()
# manage workers
self.manage_workers()
def murder_workers(self):
"""\
Kill unused/idle workers
"""
if not self.timeout:
return
workers = list(self.WORKERS.items())
for (pid, worker) in workers:
try:
if time.time() - worker.tmp.last_update() <= self.timeout:
continue
except (OSError, ValueError):
continue
if not worker.aborted:
self.log.critical("WORKER TIMEOUT (pid:%s)", pid)
worker.aborted = True
self.kill_worker(pid, signal.SIGABRT)
else:
self.kill_worker(pid, signal.SIGKILL)
def reap_workers(self):
"""\
Reap workers to avoid zombie processes
"""
try:
while True:
wpid, status = os.waitpid(-1, os.WNOHANG)
if not wpid:
break
if self.reexec_pid == wpid:
self.reexec_pid = 0
else:
# A worker was terminated. If the termination reason was
# that it could not boot, we'll shut it down to avoid
# infinite start/stop cycles.
exitcode = status >> 8
if exitcode == self.WORKER_BOOT_ERROR:
reason = "Worker failed to boot."
raise HaltServer(reason, self.WORKER_BOOT_ERROR)
if exitcode == self.APP_LOAD_ERROR:
reason = "App failed to load."
raise HaltServer(reason, self.APP_LOAD_ERROR)
worker = self.WORKERS.pop(wpid, None)
if not worker:
continue
worker.tmp.close()
self.cfg.child_exit(self, worker)
except OSError as e:
if e.errno != errno.ECHILD:
raise
def manage_workers(self):
"""\
Maintain the number of workers by spawning or killing
as required.
"""
if len(self.WORKERS) < self.num_workers:
self.spawn_workers()
workers = self.WORKERS.items()
workers = sorted(workers, key=lambda w: w[1].age)
while len(workers) > self.num_workers:
(pid, _) = workers.pop(0)
self.kill_worker(pid, signal.SIGTERM)
active_worker_count = len(workers)
if self._last_logged_active_worker_count != active_worker_count:
self._last_logged_active_worker_count = active_worker_count
self.log.debug("{0} workers".format(active_worker_count),
extra={"metric": "gunicorn.workers",
"value": active_worker_count,
"mtype": "gauge"})
def spawn_worker(self):
self.worker_age += 1
worker = self.worker_class(self.worker_age, self.pid, self.LISTENERS,
self.app, self.timeout / 2.0,
self.cfg, self.log)
self.cfg.pre_fork(self, worker)
pid = os.fork()
if pid != 0:
worker.pid = pid
self.WORKERS[pid] = worker
return pid
# Do not inherit the temporary files of other workers
for sibling in self.WORKERS.values():
sibling.tmp.close()
# Process Child
worker.pid = os.getpid()
try:
util._setproctitle("worker [%s]" % self.proc_name)
self.log.info("Booting worker with pid: %s", worker.pid)
self.cfg.post_fork(self, worker)
worker.init_process()
sys.exit(0)
except SystemExit:
raise
except AppImportError as e:
self.log.debug("Exception while loading the application",
exc_info=True)
print("%s" % e, file=sys.stderr)
sys.stderr.flush()
sys.exit(self.APP_LOAD_ERROR)
except Exception:
self.log.exception("Exception in worker process")
if not worker.booted:
sys.exit(self.WORKER_BOOT_ERROR)
sys.exit(-1)
finally:
self.log.info("Worker exiting (pid: %s)", worker.pid)
try:
worker.tmp.close()
self.cfg.worker_exit(self, worker)
except Exception:
self.log.warning("Exception during worker exit:\n%s",
traceback.format_exc())
def spawn_workers(self):
"""\
Spawn new workers as needed.
This is where a worker process leaves the main loop
of the master process.
"""
for _ in range(self.num_workers - len(self.WORKERS)):
self.spawn_worker()
time.sleep(0.1 * random.random())
def kill_workers(self, sig):
"""\
Kill all workers with the signal `sig`
:attr sig: `signal.SIG*` value
"""
worker_pids = list(self.WORKERS.keys())
for pid in worker_pids:
self.kill_worker(pid, sig)
def kill_worker(self, pid, sig):
"""\
Kill a worker
:attr pid: int, worker pid
:attr sig: `signal.SIG*` value
"""
try:
os.kill(pid, sig)
except OSError as e:
if e.errno == errno.ESRCH:
try:
worker = self.WORKERS.pop(pid)
worker.tmp.close()
self.cfg.worker_exit(self, worker)
return
except (KeyError, OSError):
return
raise
|
tests/conftest.py
|
hugovk/tldextract
| 1,200 |
130405
|
<filename>tests/conftest.py<gh_stars>1000+
"""py.test standard config file."""
import logging
import pytest
import tldextract.cache
@pytest.fixture(autouse=True)
def reset_log_level():
"""Automatically reset log level verbosity between tests. Generally want
test output the Unix way: silence is golden."""
tldextract.cache._DID_LOG_UNABLE_TO_CACHE = ( # pylint: disable=protected-access
False
)
logging.getLogger().setLevel(logging.WARN)
|
tests/test_timekline.py
|
rayshifu/easyquotation
| 3,630 |
130407
|
# coding:utf8
import unittest
from unittest import mock
import easyquotation
class TestTimeklineQuotation(unittest.TestCase):
MOCK_RESPONSE_DATA = [
(
"000001",
'min_data="\\n\\\ndate:180413\\n\\\n0930 11.64 29727\\n\\\n0931 11.65 52410\\n\\\n";',
)
]
def setUp(self):
self._obj = easyquotation.use("timekline")
@mock.patch(
"easyquotation.timekline.basequotation.BaseQuotation._fetch_stock_data"
)
def test_fetch_stock_data(self, mock_super_fetch):
test_cases = [
(["000001"], ["test_data"], [("000001", "test_data")]),
(
["000001", "000002"],
["test_data", None],
[("000001", "test_data")],
),
([], [], []),
]
for stock_list, resp_data, expected in test_cases:
mock_super_fetch.return_value = resp_data
res = self._obj._fetch_stock_data(stock_list)
self.assertListEqual(res, expected)
def test_format_response_data(self):
excepted = {
"000001": {
"date": "20180413",
"time_data": [
["0930", "11.64", "29727"],
["0931", "11.65", "52410"],
],
}
}
result = self._obj.format_response_data(self.MOCK_RESPONSE_DATA)
self.assertDictEqual(result, excepted)
if __name__ == "__main__":
unittest.main()
|
recipes/Python/576556_Generating_random_numbers_arbitrary/recipe-576556.py
|
tdiprima/code
| 2,023 |
130418
|
import pylab
import numpy
class GeneralRandom:
"""This class enables us to generate random numbers with an arbitrary
distribution."""
def __init__(self, x = pylab.arange(-1.0, 1.0, .01), p = None, Nrl = 1000):
"""Initialize the lookup table (with default values if necessary)
Inputs:
x = random number values
p = probability density profile at that point
Nrl = number of reverse look up values between 0 and 1"""
if p == None:
p = pylab.exp(-10*x**2.0)
self.set_pdf(x, p, Nrl)
def set_pdf(self, x, p, Nrl = 1000):
"""Generate the lookup tables.
x is the value of the random variate
pdf is its probability density
cdf is the cumulative pdf
inversecdf is the inverse look up table
"""
self.x = x
self.pdf = p/p.sum() #normalize it
self.cdf = self.pdf.cumsum()
self.inversecdfbins = Nrl
self.Nrl = Nrl
y = pylab.arange(Nrl)/float(Nrl)
delta = 1.0/Nrl
self.inversecdf = pylab.zeros(Nrl)
self.inversecdf[0] = self.x[0]
cdf_idx = 0
for n in xrange(1,self.inversecdfbins):
while self.cdf[cdf_idx] < y[n] and cdf_idx < Nrl:
cdf_idx += 1
self.inversecdf[n] = self.x[cdf_idx-1] + (self.x[cdf_idx] - self.x[cdf_idx-1]) * (y[n] - self.cdf[cdf_idx-1])/(self.cdf[cdf_idx] - self.cdf[cdf_idx-1])
if cdf_idx >= Nrl:
break
self.delta_inversecdf = pylab.concatenate((pylab.diff(self.inversecdf), [0]))
def random(self, N = 1000):
"""Give us N random numbers with the requested distribution"""
idx_f = numpy.random.uniform(size = N, high = self.Nrl-1)
idx = pylab.array([idx_f],'i')
y = self.inversecdf[idx] + (idx_f - idx)*self.delta_inversecdf[idx]
return y
def plot_pdf(self):
pylab.plot(self.x, self.pdf)
def self_test(self, N = 1000):
pylab.figure()
#The cdf
pylab.subplot(2,2,1)
pylab.plot(self.x, self.cdf)
#The inverse cdf
pylab.subplot(2,2,2)
y = pylab.arange(self.Nrl)/float(self.Nrl)
pylab.plot(y, self.inversecdf)
#The actual generated numbers
pylab.subplot(2,2,3)
y = self.random(N)
p1, edges = pylab.histogram(y, bins = 50,
range = (self.x.min(), self.x.max()),
normed = True, new = True)
x1 = 0.5*(edges[0:-1] + edges[1:])
pylab.plot(x1, p1/p1.max())
pylab.plot(self.x, self.pdf/self.pdf.max())
|
emukit/benchmarking/loop_benchmarking/benchmark_result.py
|
ndalchau/emukit
| 272 |
130453
|
from typing import List
import numpy as np
class BenchmarkResult:
def __init__(self, loop_names: List[str], n_repeats: int, metric_names: List[str]):
"""
:param loop_names: List of loop names
:param n_repeats: Number of random restarts in benchmarking
:param metric_names: List of metric names
"""
self.loop_names = loop_names
self.n_repeats = n_repeats
self.metric_names = metric_names
self._results = dict()
for loop_name in loop_names:
self._results[loop_name] = dict()
for metric_name in metric_names:
self._results[loop_name][metric_name] = []
for i in range(n_repeats):
self._results[loop_name][metric_name].append([])
def add_results(self, loop_name: str, i_repeat: int, metric_name: str, metric_values: np.ndarray) -> None:
"""
Add results for a specific loop, metric and repeat combination
:param loop_name: Name of loop
:param i_repeat: Index of repeat
:param metric_name: Name of metric
:param metric_values: Metric values to add
"""
self._results[loop_name][metric_name][i_repeat] = metric_values.flatten()
def extract_metric_as_array(self, loop_name: str, metric_name: str) -> np.ndarray:
"""
Returns results over all repeats and iterations for a specific metric and loop name pair
:param loop_name: Name of loop to return results for
:param metric_name: Name of metric to extract
:return: 2-d numpy array of shape (n_repeats x n_iterations)
"""
return np.array(self._results[loop_name][metric_name])
|
tensorflow/python/kernel_tests/batch_dataset_op_test.py
|
ryorda/tensorflow-viennacl
| 522 |
130515
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class BatchDatasetTest(test.TestCase):
def testBatchDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count) -> BatchDataset(batch_size).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
iterator = (dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count).batch(batch_size).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([[None] + list(c.shape[1:]) for c in components],
[t.shape.as_list() for t in get_next])
with self.test_session() as sess:
# Batch of a finite input, where the batch_size divides the
# total number of elements.
sess.run(init_op, feed_dict={count: 28, batch_size: 14})
num_batches = (28 * 7) // 14
for i in range(num_batches):
result = sess.run(get_next)
for component, result_component in zip(components, result):
for j in range(14):
self.assertAllEqual(component[(i*14 + j) % 7]**2,
result_component[j])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Batch of a finite input, where the batch_size does not
# divide the total number of elements.
sess.run(init_op, feed_dict={count: 14, batch_size: 8})
# We expect (num_batches - 1) full-sized batches.
num_batches = int(math.ceil((14 * 7) / 8))
for i in range(num_batches - 1):
result = sess.run(get_next)
for component, result_component in zip(components, result):
for j in range(8):
self.assertAllEqual(component[(i*8 + j) % 7]**2,
result_component[j])
result = sess.run(get_next)
for component, result_component in zip(components, result):
for j in range((14 * 7) % 8):
self.assertAllEqual(component[((num_batches - 1)*8 + j) % 7]**2,
result_component[j])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Batch of an empty input should fail straight away.
sess.run(init_op, feed_dict={count: 0, batch_size: 8})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Empty batch should be an initialization time error.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(init_op, feed_dict={count: 14, batch_size: 0})
def testPaddedBatchDataset(self):
seq_lens = array_ops.placeholder(dtypes.int32, shape=[None])
padded_shape = array_ops.placeholder(dtypes.int64, shape=[1])
iterator = (dataset_ops.Dataset.from_tensor_slices(seq_lens)
.map(lambda x: array_ops.fill([x], x)).padded_batch(
4,
padded_shapes=padded_shape).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
# Test with random sequence lengths, and max padding.
random_seq_lens = np.random.randint(20, size=(32,)).astype(np.int32)
sess.run(init_op, feed_dict={padded_shape: [-1],
seq_lens: random_seq_lens})
for i in range(8):
result = sess.run(get_next)
padded_len = np.max(result)
self.assertEqual((4, padded_len), result.shape)
for j in range(4):
seq_len = random_seq_lens[(i*4)+j]
self.assertAllEqual(result[j, :seq_len], [seq_len] * seq_len)
self.assertAllEqual(result[j, seq_len:], [0] * (padded_len - seq_len))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test with random sequence lengths, and constant padding.
sess.run(init_op, feed_dict={padded_shape: [25],
seq_lens: random_seq_lens})
for i in range(8):
result = sess.run(get_next)
self.assertEqual((4, 25), result.shape)
for j in range(4):
seq_len = random_seq_lens[(i*4)+j]
self.assertAllEqual(result[j, :seq_len], [seq_len] * seq_len)
self.assertAllEqual(result[j, seq_len:], [0] * (25 - seq_len))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test correct handling of empty tensors.
sess.run(init_op, feed_dict={padded_shape: [-1],
seq_lens: [0, 0, 0, 0]})
result = sess.run(get_next)
self.assertAllEqual([[], [], [], []], result)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test error handling with constant sequence lengths, and
# too-short padding.
sess.run(init_op, feed_dict={padded_shape: [5],
seq_lens: [6, 5, 5, 5]})
with self.assertRaises(errors.DataLossError):
result = sess.run(get_next)
def testPaddedBatchDatasetNonDefaultPadding(self):
seq_lens = array_ops.placeholder(dtypes.int32, shape=[None])
padded_shape = array_ops.placeholder(dtypes.int64, shape=[1])
def fill_tuple(x):
filled = array_ops.fill([x], x)
return (filled, string_ops.as_string(filled))
iterator = (dataset_ops.Dataset.from_tensor_slices(seq_lens).map(fill_tuple)
.padded_batch(
4,
padded_shapes=(padded_shape, padded_shape),
padding_values=(-1, "<end>")).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
# Test with random sequence lengths, and max padding.
random_seq_lens = np.random.randint(20, size=(32,)).astype(np.int32)
sess.run(init_op, feed_dict={padded_shape: [-1],
seq_lens: random_seq_lens})
for i in range(8):
result = sess.run(get_next)
padded_len = np.max(result[0])
self.assertEqual((4, padded_len), result[0].shape)
self.assertEqual((4, padded_len), result[1].shape)
for j in range(4):
seq_len = random_seq_lens[(i*4)+j]
self.assertAllEqual(result[0][j, :seq_len], [seq_len] * seq_len)
self.assertAllEqual(result[0][j, seq_len:],
[-1] * (padded_len - seq_len))
self.assertAllEqual(result[1][j, :seq_len],
[compat.as_bytes(str(seq_len))] * seq_len)
self.assertAllEqual(result[1][j, seq_len:],
[b"<end>"] * (padded_len - seq_len))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPaddedBatchDatasetShapeSpecifications(self):
int_placeholder = array_ops.placeholder(dtypes.int32)
float_placeholder = array_ops.placeholder(dtypes.float32)
string_placeholder = array_ops.placeholder(dtypes.string)
input_dataset = dataset_ops.Dataset.from_tensors(
(int_placeholder, float_placeholder, string_placeholder))
# Test different ways of specifying the `padded_shapes` argument.
dynamic_padding_from_tensor_shapes = input_dataset.padded_batch(
32,
padded_shapes=(tensor_shape.TensorShape([None]),
tensor_shape.TensorShape([None, None]),
tensor_shape.TensorShape([37])))
dynamic_padding_from_lists = input_dataset.padded_batch(
32, padded_shapes=([None], [None, None], [37]))
dynamic_padding_from_lists_with_minus_one = input_dataset.padded_batch(
32, padded_shapes=([-1], [-1, -1], [37]))
dynamic_padding_from_tensors = input_dataset.padded_batch(
32,
padded_shapes=(constant_op.constant([-1], dtype=dtypes.int64),
constant_op.constant([-1, -1], dtype=dtypes.int64),
constant_op.constant([37], dtype=dtypes.int64)))
for dataset in [dynamic_padding_from_tensor_shapes,
dynamic_padding_from_lists,
dynamic_padding_from_lists_with_minus_one,
dynamic_padding_from_tensors]:
self.assertEqual([None, None], dataset.output_shapes[0].as_list())
self.assertEqual([None, None, None], dataset.output_shapes[1].as_list())
self.assertEqual([None, 37], dataset.output_shapes[2].as_list())
if __name__ == "__main__":
test.main()
|
venv/Lib/site-packages/nipype/utils/nipype_cmd.py
|
richung99/digitizePlots
| 585 |
130516
|
# -*- coding: utf-8 -*-
import os
import argparse
import inspect
import sys
from ..interfaces.base import Interface, InputMultiPath, traits
from ..interfaces.base.support import get_trait_desc
from .misc import str2bool
def listClasses(module=None):
if module:
__import__(module)
pkg = sys.modules[module]
print("Available Interfaces:")
for k, v in sorted(list(pkg.__dict__.items())):
if inspect.isclass(v) and issubclass(v, Interface):
print("\t%s" % k)
def add_options(parser=None, module=None, function=None):
interface = None
if parser and module and function:
__import__(module)
interface = getattr(sys.modules[module], function)()
inputs = interface.input_spec()
for name, spec in sorted(interface.inputs.traits(transient=None).items()):
desc = "\n".join(get_trait_desc(inputs, name, spec))[len(name) + 2 :]
args = {}
if spec.is_trait_type(traits.Bool):
args["action"] = "store_true"
if hasattr(spec, "mandatory") and spec.mandatory:
if spec.is_trait_type(InputMultiPath):
args["nargs"] = "+"
parser.add_argument(name, help=desc, **args)
else:
if spec.is_trait_type(InputMultiPath):
args["nargs"] = "*"
parser.add_argument("--%s" % name, dest=name, help=desc, **args)
return parser, interface
def run_instance(interface, options):
print("setting function inputs")
for input_name, _ in list(interface.inputs.items()):
if getattr(options, input_name) is not None:
value = getattr(options, input_name)
try:
setattr(interface.inputs, input_name, value)
except ValueError as e:
print("Error when setting the value of %s: '%s'" % (input_name, str(e)))
print(interface.inputs)
res = interface.run()
print(res.outputs)
def main(argv):
if len(argv) == 2 and not argv[1].startswith("-"):
listClasses(argv[1])
sys.exit(0)
parser = argparse.ArgumentParser(
description="Nipype interface runner", prog=argv[0]
)
parser.add_argument("module", type=str, help="Module name")
parser.add_argument("interface", type=str, help="Interface name")
parsed = parser.parse_args(args=argv[1:3])
_, prog = os.path.split(argv[0])
interface_parser = argparse.ArgumentParser(
description="Run %s" % parsed.interface, prog=" ".join([prog] + argv[1:3])
)
interface_parser, interface = add_options(
interface_parser, parsed.module, parsed.interface
)
args = interface_parser.parse_args(args=argv[3:])
run_instance(interface, args)
|
tests/models/data/horovod/__init__.py
|
neptune-ml/pytorch-lightning
| 15,666 |
130517
|
<filename>tests/models/data/horovod/__init__.py
# this is needed only for mypy==0.800 as it undestands only packages
|
tests/test_redundant.py
|
pooya/disco
| 786 |
130531
|
from disco.core import result_iterator
from disco.error import DataError
from disco.test import TestCase, TestJob, FailedReply
class RedundantJob(TestJob):
@staticmethod
def map(e, params):
yield int(e), ''
@staticmethod
def reduce(iter, params):
yield sum(k for k, v in iter), ''
class RedundantTestCase(TestCase):
def serve(self, path):
if 'fail' in path:
raise FailedReply()
return '{0}\n'.format(int(path) * 10)
def runTest(self):
input = ['1', ['2_fail', '2_still_fail', '200'], '3', ['4_fail', '400']]
self.job = RedundantJob().run(input=self.test_server.urls(input))
self.assertResults(self.job, [(6040, '')])
class RedundantOutputTestCase(TestCase):
# This is a tricky test case now that comm.py tries really
# hard to access the url, which in this case doesn't exist
# (http://nonode). The test could take almost 10 minutes.
# We should have a way to lower the number of retries
# globally.
"""
def test_unavailable(self):
from disco.schemes import scheme_raw
results = list(result_iterator([['http://nonode', 'raw://hello']],
reader=scheme_raw.input_stream))
self.assertEquals(results, ['hello'])
"""
def test_corrupt(self):
def corrupt_reader(fd, size, url, params):
yield 'hello'
if 'corrupt' in url:
raise DataError("Corrupt!", url)
yield 'there'
self.assertAllEqual(result_iterator([['raw://corrupt'] * 9 +
['raw://decent']],
reader=corrupt_reader),
['hello', 'there'])
|
datasets/csqa_new/extract_submission.py
|
gogowhy/ENet_framework
| 244 |
130535
|
<reponame>gogowhy/ENet_framework
import json
with open("test_rand_split.jsonl", 'r') as fw:
for line in open("test_rand_split.jsonl", 'r').readlines():
data = json.loads(line.strip())
print(data["id"]+','+data["answerKey"])
|
superset/reports/logs/api.py
|
razzius/superset
| 18,621 |
130550
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from typing import Any, Dict, Optional
from flask import Response
from flask_appbuilder.api import expose, permission_name, protect, rison, safe
from flask_appbuilder.api.schemas import get_item_schema, get_list_schema
from flask_appbuilder.hooks import before_request
from flask_appbuilder.models.sqla.interface import SQLAInterface
from superset import is_feature_enabled
from superset.constants import MODEL_API_RW_METHOD_PERMISSION_MAP, RouteMethod
from superset.models.reports import ReportExecutionLog
from superset.reports.logs.schemas import openapi_spec_methods_override
from superset.views.base_api import BaseSupersetModelRestApi
logger = logging.getLogger(__name__)
class ReportExecutionLogRestApi(BaseSupersetModelRestApi):
datamodel = SQLAInterface(ReportExecutionLog)
@before_request
def ensure_alert_reports_enabled(self) -> Optional[Response]:
if not is_feature_enabled("ALERT_REPORTS"):
return self.response_404()
return None
include_route_methods = {RouteMethod.GET, RouteMethod.GET_LIST}
method_permission_name = MODEL_API_RW_METHOD_PERMISSION_MAP
class_permission_name = "ReportSchedule"
resource_name = "report"
allow_browser_login = True
show_columns = [
"id",
"scheduled_dttm",
"end_dttm",
"start_dttm",
"value",
"value_row_json",
"state",
"error_message",
"uuid",
]
list_columns = [
"id",
"scheduled_dttm",
"end_dttm",
"start_dttm",
"value",
"value_row_json",
"state",
"error_message",
"uuid",
]
order_columns = [
"state",
"value",
"error_message",
"end_dttm",
"start_dttm",
"scheduled_dttm",
]
openapi_spec_tag = "Report Schedules"
openapi_spec_methods = openapi_spec_methods_override
@staticmethod
def _apply_layered_relation_to_rison( # pylint: disable=invalid-name
layer_id: int, rison_parameters: Dict[str, Any]
) -> None:
if "filters" not in rison_parameters:
rison_parameters["filters"] = []
rison_parameters["filters"].append(
{"col": "report_schedule", "opr": "rel_o_m", "value": layer_id}
)
@expose("/<int:pk>/log/", methods=["GET"])
@protect()
@safe
@permission_name("get")
@rison(get_list_schema)
def get_list( # pylint: disable=arguments-differ
self, pk: int, **kwargs: Any
) -> Response:
"""Get a list of report schedule logs
---
get:
description: >-
Get a list of report schedule logs
parameters:
- in: path
schema:
type: integer
description: The report schedule id for these logs
name: pk
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_list_schema'
responses:
200:
description: Items from logs
content:
application/json:
schema:
type: object
properties:
ids:
description: >-
A list of log ids
type: array
items:
type: string
count:
description: >-
The total record count on the backend
type: number
result:
description: >-
The result from the get list query
type: array
items:
$ref: '#/components/schemas/{{self.__class__.__name__}}.get_list' # pylint: disable=line-too-long
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
self._apply_layered_relation_to_rison(pk, kwargs["rison"])
return self.get_list_headless(**kwargs)
@expose("/<int:pk>/log/<int:log_id>", methods=["GET"])
@protect()
@safe
@permission_name("get")
@rison(get_item_schema)
def get( # pylint: disable=arguments-differ
self, pk: int, log_id: int, **kwargs: Any
) -> Response:
"""Get a report schedule log
---
get:
description: >-
Get a report schedule log
parameters:
- in: path
schema:
type: integer
name: pk
description: The report schedule pk for log
- in: path
schema:
type: integer
name: log_id
description: The log pk
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_item_schema'
responses:
200:
description: Item log
content:
application/json:
schema:
type: object
properties:
id:
description: The log id
type: string
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.get'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
self._apply_layered_relation_to_rison(pk, kwargs["rison"])
return self.get_headless(log_id, **kwargs)
|
convlab2/policy/hdsa/multiwoz/predictor.py
|
Malavikka/ConvLab-2
| 339 |
130551
|
<reponame>Malavikka/ConvLab-2<gh_stars>100-1000
import os
import zipfile
import torch
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_pretrained_bert.modeling import BertForSequenceClassification
from pytorch_pretrained_bert.tokenization import BertTokenizer
from convlab2.policy.hdsa.multiwoz.transformer import Constants
from convlab2.util.file_util import cached_path
from convlab2.util.multiwoz.dbquery import Database
def examine(domain, slot):
if slot == "addr":
slot = 'address'
elif slot == "post":
slot = 'postcode'
elif slot == "ref":
slot = 'ref'
elif slot == "car":
slot = "type"
elif slot == 'dest':
slot = 'destination'
elif domain == 'train' and slot == 'id':
slot = 'trainid'
elif slot == 'leave':
slot = 'leaveat'
elif slot == 'arrive':
slot = 'arriveby'
elif slot == 'price':
slot = 'pricerange'
elif slot == 'depart':
slot = 'departure'
elif slot == 'name':
slot = 'name'
elif slot == 'type':
slot = 'type'
elif slot == 'area':
slot = 'area'
elif slot == 'parking':
slot = 'parking'
elif slot == 'internet':
slot = 'internet'
elif slot == 'stars':
slot = 'stars'
elif slot == 'food':
slot = 'food'
elif slot == 'phone':
slot = 'phone'
elif slot == 'day':
slot = 'day'
else:
slot = 'illegal'
return slot
def truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, file, turn, guid, text_m, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.file = file
self.turn = turn
self.guid = guid
self.text_m = text_m
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, file, turn, input_ids, input_mask, segment_ids, label_id):
self.file = file
self.turn = turn
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class HDSA_predictor():
def __init__(self, archive_file, model_file=None, use_cuda=False):
if not os.path.isfile(archive_file):
if not model_file:
raise Exception("No model for DA-predictor is specified!")
archive_file = cached_path(model_file)
model_dir = os.path.dirname(os.path.abspath(__file__))
if not os.path.exists(os.path.join(model_dir, 'checkpoints')):
archive = zipfile.ZipFile(archive_file, 'r')
archive.extractall(model_dir)
load_dir = os.path.join(model_dir, "checkpoints/predictor/save_step_23926")
self.db=Database()
if not os.path.exists(load_dir):
archive = zipfile.ZipFile('{}.zip'.format(load_dir), 'r')
archive.extractall(os.path.dirname(load_dir))
self.tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", do_lower_case=True)
self.max_seq_length = 256
self.domain = 'restaurant'
self.model = BertForSequenceClassification.from_pretrained(load_dir,
cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(-1)), num_labels=44)
self.device = 'cuda' if use_cuda else 'cpu'
self.model.to(self.device)
def gen_example(self, state):
file = ''
turn = 0
guid = 'infer'
act = state['user_action']
for w in act:
d=w[1]
if Constants.domains.index(d.lower()) < 8:
self.domain = d.lower()
hierarchical_act_vecs = [0 for _ in range(44)] # fake target
meta = state['belief_state']
constraints = []
if self.domain != 'bus':
for slot in meta[self.domain]['semi']:
if meta[self.domain]['semi'][slot] != "":
constraints.append([slot, meta[self.domain]['semi'][slot]])
query_result = self.db.query(self.domain, constraints)
if not query_result:
kb = {'count':'0'}
src = "no information"
else:
kb = query_result[0]
kb['count'] = str(len(query_result))
src = []
for k, v in kb.items():
k = examine(self.domain, k.lower())
if k != 'illegal' and isinstance(v, str):
src.extend([k, 'is', v])
src = " ".join(src)
usr = state['history'][-1][-1]
sys = state['history'][-2][-1] if len(state['history']) > 1 else None
example = InputExample(file, turn, guid, src, usr, sys, hierarchical_act_vecs)
kb['domain'] = self.domain
return example, kb
def gen_feature(self, example):
tokens_a = self.tokenizer.tokenize(example.text_a)
tokens_b = self.tokenizer.tokenize(example.text_b)
tokens_m = self.tokenizer.tokenize(example.text_m)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
truncate_seq_pair(tokens_a, tokens_b, self.max_seq_length - 3)
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * (len(tokens_a) + 2)
assert len(tokens) == len(segment_ids)
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
if len(tokens) < self.max_seq_length:
if len(tokens_m) > self.max_seq_length - len(tokens) - 1:
tokens_m = tokens_m[:self.max_seq_length - len(tokens) - 1]
tokens += tokens_m + ['[SEP]']
segment_ids += [0] * (len(tokens_m) + 1)
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (self.max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == self.max_seq_length
assert len(input_mask) == self.max_seq_length
assert len(segment_ids) == self.max_seq_length
feature = InputFeatures(file=example.file,
turn=example.turn,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=example.label)
return feature
def predict(self, state):
example, kb = self.gen_example(state)
feature = self.gen_feature(example)
input_ids = torch.tensor([feature.input_ids], dtype=torch.long).to(self.device)
input_masks = torch.tensor([feature.input_mask], dtype=torch.long).to(self.device)
segment_ids = torch.tensor([feature.segment_ids], dtype=torch.long).to(self.device)
with torch.no_grad():
logits = self.model(input_ids, segment_ids, input_masks, labels=None)
logits = torch.sigmoid(logits)
preds = (logits > 0.4).float()
preds_numpy = preds.cpu().nonzero().squeeze().numpy()
# for i in preds_numpy:
# if i < 10:
# print(Constants.domains[i], end=' ')
# elif i < 17:
# print(Constants.functions[i-10], end=' ')
# else:
# print(Constants.arguments[i-17], end=' ')
# print()
return preds, kb
|
examples/domain_generalization/re_identification/baseline.py
|
neka-nat/Transfer-Learning-Library
| 1,474 |
130556
|
<reponame>neka-nat/Transfer-Learning-Library
"""
@author: <NAME>
@contact: <EMAIL>
"""
import random
import time
import warnings
import sys
import argparse
import shutil
import os.path as osp
import numpy as np
import torch
import torch.nn as nn
from torch.nn import DataParallel
import torch.backends.cudnn as cudnn
from torch.optim import Adam
from torch.utils.data import DataLoader
sys.path.append('../../..')
from common.vision.models.reid.loss import CrossEntropyLossWithLabelSmooth, SoftTripletLoss
from common.vision.models.reid.identifier import ReIdentifier
import common.vision.datasets.reid as datasets
from common.vision.datasets.reid.convert import convert_to_pytorch_dataset
from common.utils.scheduler import WarmupMultiStepLR
from common.utils.metric.reid import validate, visualize_ranked_results
from common.utils.data import ForeverDataIterator, RandomMultipleGallerySampler
from common.utils.metric import accuracy
from common.utils.meter import AverageMeter, ProgressMeter
from common.utils.logger import CompleteLogger
sys.path.append('.')
import utils
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(args: argparse.Namespace):
logger = CompleteLogger(args.log, args.phase)
print(args)
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
cudnn.benchmark = True
# Data loading code
train_transform = utils.get_train_transform(args.height, args.width, args.train_resizing,
random_horizontal_flip=True,
random_color_jitter=False,
random_gray_scale=False)
val_transform = utils.get_val_transform(args.height, args.width)
print("train_transform: ", train_transform)
print("val_transform: ", val_transform)
working_dir = osp.dirname(osp.abspath(__file__))
root = osp.join(working_dir, args.root)
# source dataset
source_dataset = datasets.__dict__[args.source](root=osp.join(root, args.source.lower()))
sampler = RandomMultipleGallerySampler(source_dataset.train, args.num_instances)
train_loader = DataLoader(
convert_to_pytorch_dataset(source_dataset.train, root=source_dataset.images_dir, transform=train_transform),
batch_size=args.batch_size, num_workers=args.workers, sampler=sampler, pin_memory=True, drop_last=True)
train_iter = ForeverDataIterator(train_loader)
val_loader = DataLoader(
convert_to_pytorch_dataset(list(set(source_dataset.query) | set(source_dataset.gallery)),
root=source_dataset.images_dir,
transform=val_transform),
batch_size=args.batch_size, num_workers=args.workers, shuffle=False, pin_memory=True)
# target dataset
target_dataset = datasets.__dict__[args.target](root=osp.join(root, args.target.lower()))
test_loader = DataLoader(
convert_to_pytorch_dataset(list(set(target_dataset.query) | set(target_dataset.gallery)),
root=target_dataset.images_dir,
transform=val_transform),
batch_size=args.batch_size, num_workers=args.workers, shuffle=False, pin_memory=True)
# create model
num_classes = source_dataset.num_train_pids
backbone = utils.get_model(args.arch)
pool_layer = nn.Identity() if args.no_pool else None
model = ReIdentifier(backbone, num_classes, finetune=args.finetune, pool_layer=pool_layer).to(device)
model = DataParallel(model)
# define optimizer and learning rate scheduler
optimizer = Adam(model.module.get_parameters(base_lr=args.lr, rate=args.rate), args.lr,
weight_decay=args.weight_decay)
lr_scheduler = WarmupMultiStepLR(optimizer, args.milestones, gamma=0.1, warmup_factor=0.1,
warmup_steps=args.warmup_steps)
# resume from the best checkpoint
if args.phase != 'train':
checkpoint = torch.load(logger.get_checkpoint_path('best'), map_location='cpu')
model.load_state_dict(checkpoint)
# analysis the model
if args.phase == 'analysis':
# plot t-SNE
utils.visualize_tsne(source_loader=val_loader, target_loader=test_loader, model=model,
filename=osp.join(logger.visualize_directory, 'analysis', 'TSNE.pdf'), device=device)
# visualize ranked results
visualize_ranked_results(test_loader, model, target_dataset.query, target_dataset.gallery, device,
visualize_dir=logger.visualize_directory, width=args.width, height=args.height,
rerank=args.rerank)
return
if args.phase == 'test':
print("Test on source domain:")
validate(val_loader, model, source_dataset.query, source_dataset.gallery, device, cmc_flag=True,
rerank=args.rerank)
print("Test on target domain:")
validate(test_loader, model, target_dataset.query, target_dataset.gallery, device, cmc_flag=True,
rerank=args.rerank)
return
# define loss function
criterion_ce = CrossEntropyLossWithLabelSmooth(num_classes).to(device)
criterion_triplet = SoftTripletLoss(margin=args.margin).to(device)
# start training
best_val_mAP = 0.
best_test_mAP = 0.
for epoch in range(args.epochs):
# print learning rate
print(lr_scheduler.get_lr())
# train for one epoch
train(train_iter, model, criterion_ce, criterion_triplet, optimizer, epoch, args)
# update learning rate
lr_scheduler.step()
if (epoch + 1) % args.eval_step == 0 or (epoch == args.epochs - 1):
# evaluate on validation set
print("Validation on source domain...")
_, val_mAP = validate(val_loader, model, source_dataset.query, source_dataset.gallery, device,
cmc_flag=True)
# remember best mAP and save checkpoint
torch.save(model.state_dict(), logger.get_checkpoint_path('latest'))
if val_mAP > best_val_mAP:
shutil.copy(logger.get_checkpoint_path('latest'), logger.get_checkpoint_path('best'))
best_val_mAP = max(val_mAP, best_val_mAP)
# evaluate on test set
print("Test on target domain...")
_, test_mAP = validate(test_loader, model, target_dataset.query, target_dataset.gallery, device,
cmc_flag=True, rerank=args.rerank)
best_test_mAP = max(test_mAP, best_test_mAP)
# evaluate on test set
model.load_state_dict(torch.load(logger.get_checkpoint_path('best')))
print("Test on target domain:")
_, test_mAP = validate(test_loader, model, target_dataset.query, target_dataset.gallery, device,
cmc_flag=True, rerank=args.rerank)
print("test mAP on target = {}".format(test_mAP))
print("oracle mAP on target = {}".format(best_test_mAP))
logger.close()
def train(train_iter: ForeverDataIterator, model, criterion_ce: CrossEntropyLossWithLabelSmooth,
criterion_triplet: SoftTripletLoss, optimizer: Adam, epoch: int, args: argparse.Namespace):
batch_time = AverageMeter('Time', ':4.2f')
data_time = AverageMeter('Data', ':3.1f')
losses_ce = AverageMeter('CeLoss', ':3.2f')
losses_triplet = AverageMeter('TripletLoss', ':3.2f')
losses = AverageMeter('Loss', ':3.2f')
cls_accs = AverageMeter('Cls Acc', ':3.1f')
progress = ProgressMeter(
args.iters_per_epoch,
[batch_time, data_time, losses_ce, losses_triplet, losses, cls_accs],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i in range(args.iters_per_epoch):
x, _, labels, _ = next(train_iter)
x = x.to(device)
labels = labels.to(device)
# measure data loading time
data_time.update(time.time() - end)
# compute output
y, f = model(x)
# cross entropy loss
loss_ce = criterion_ce(y, labels)
# triplet loss
loss_triplet = criterion_triplet(f, f, labels)
loss = loss_ce + loss_triplet * args.trade_off
cls_acc = accuracy(y, labels)[0]
losses_ce.update(loss_ce.item(), x.size(0))
losses_triplet.update(loss_triplet.item(), x.size(0))
losses.update(loss.item(), x.size(0))
cls_accs.update(cls_acc.item(), x.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
if __name__ == '__main__':
dataset_names = sorted(
name for name in datasets.__dict__
if not name.startswith("__") and callable(datasets.__dict__[name])
)
parser = argparse.ArgumentParser(description="Baseline for Domain Generalization ReID")
# dataset parameters
parser.add_argument('root', metavar='DIR',
help='root path of dataset')
parser.add_argument('-s', '--source', type=str, help='source domain')
parser.add_argument('-t', '--target', type=str, help='target domain')
parser.add_argument('--train-resizing', type=str, default='default')
# model parameters
parser.add_argument('-a', '--arch', metavar='ARCH', default='reid_resnet50',
choices=utils.get_model_names(),
help='backbone architecture: ' +
' | '.join(utils.get_model_names()) +
' (default: reid_resnet50)')
parser.add_argument('--no-pool', action='store_true', help='no pool layer after the feature extractor.')
parser.add_argument('--finetune', action='store_true', help='whether use 10x smaller lr for backbone')
parser.add_argument('--rate', type=float, default=0.2)
# training parameters
parser.add_argument('--trade-off', type=float, default=1,
help='trade-off hyper parameter between cross entropy loss and triplet loss')
parser.add_argument('--margin', type=float, default=0.0, help='margin for the triplet loss with batch hard')
parser.add_argument('-j', '--workers', type=int, default=4)
parser.add_argument('-b', '--batch-size', type=int, default=16)
parser.add_argument('--height', type=int, default=256, help="input height")
parser.add_argument('--width', type=int, default=128, help="input width")
parser.add_argument('--num-instances', type=int, default=4,
help="each minibatch consist of "
"(batch_size // num_instances) identities, and "
"each identity has num_instances instances, "
"default: 4")
parser.add_argument('--lr', type=float, default=0.00035,
help="initial learning rate")
parser.add_argument('--weight-decay', type=float, default=5e-4)
parser.add_argument('--epochs', type=int, default=80)
parser.add_argument('--warmup-steps', type=int, default=10, help='number of warp-up steps')
parser.add_argument('--milestones', nargs='+', type=int, default=[40, 70],
help='milestones for the learning rate decay')
parser.add_argument('--eval-step', type=int, default=40)
parser.add_argument('--iters-per-epoch', type=int, default=400)
parser.add_argument('--print-freq', type=int, default=40)
parser.add_argument('--seed', default=None, type=int, help='seed for initializing training.')
parser.add_argument('--rerank', action='store_true', help="evaluation only")
parser.add_argument("--log", type=str, default='baseline',
help="Where to save logs, checkpoints and debugging images.")
parser.add_argument("--phase", type=str, default='train', choices=['train', 'test', 'analysis'],
help="When phase is 'test', only test the model."
"When phase is 'analysis', only analysis the model.")
args = parser.parse_args()
main(args)
|
model/resnet34.py
|
FAHAI-1/tensorflow-cifar100
| 109 |
130561
|
<reponame>FAHAI-1/tensorflow-cifar100<filename>model/resnet34.py
import tensorflow as tf
def identity_block2d(input_tensor, kernel_size, filters, stage, block, is_training, reuse, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()):
filters1, filters2, filters3 = filters
conv_name_2 = 'conv' + str(stage) + '_' + str(block) + '_3x3'
bn_name_2 = 'bn' + str(stage) + '_' + str(block) + '_3x3'
x = tf.layers.conv2d(input_tensor, filters2, kernel_size, use_bias=False, padding='SAME', kernel_initializer=kernel_initializer, name=conv_name_2, reuse=reuse)
x = tf.layers.batch_normalization(x, training=is_training, name=bn_name_2, reuse=reuse)
x = tf.nn.relu(x)
conv_name_3 = 'conv' + str(stage) + '_' + str(block) + '_1x1_increase'
bn_name_3 = 'bn' + str(stage) + '_' + str(block) + '_1x1_increase'
x = tf.layers.conv2d(x, filters3, (kernel_size, kernel_size), use_bias=False, padding='SAME', kernel_initializer=kernel_initializer, name=conv_name_3, reuse=reuse)
x = tf.layers.batch_normalization(x, training=is_training, name=bn_name_3, reuse=reuse)
x = tf.add(input_tensor, x)
x = tf.nn.relu(x)
return x
def conv_block_2d(input_tensor, kernel_size, filters, stage, block, is_training, reuse, strides=(2, 2), kernel_initializer=tf.contrib.layers.variance_scaling_initializer()):
filters1, filters2, filters3 = filters
conv_name_2 = 'conv' + str(stage) + '_' + str(block) + '_3x3'
bn_name_2 = 'bn' + str(stage) + '_' + str(block) + '_3x3'
x = tf.layers.conv2d(input_tensor, filters2, (kernel_size, kernel_size), use_bias=False, strides=strides, padding='SAME', kernel_initializer=kernel_initializer, name=conv_name_2, reuse=reuse)
x = tf.layers.batch_normalization(x, training=is_training, name=bn_name_2, reuse=reuse)
x = tf.nn.relu(x)
conv_name_3 = 'conv' + str(stage) + '_' + str(block) + '_1x1_increase'
bn_name_3 = 'bn' + str(stage) + '_' + str(block) + '_1x1_increase'
x = tf.layers.conv2d(x, filters3, (kernel_size, kernel_size), use_bias=False, padding='SAME', kernel_initializer=kernel_initializer, name=conv_name_3, reuse=reuse)
x = tf.layers.batch_normalization(x, training=is_training, name=bn_name_3, reuse=reuse)
conv_name_4 = 'conv' + str(stage) + '_' + str(block) + '_1x1_shortcut'
bn_name_4 = 'bn' + str(stage) + '_' + str(block) + '_1x1_shortcut'
shortcut = tf.layers.conv2d(input_tensor, filters3, (kernel_size, kernel_size), use_bias=False, strides=strides, padding='SAME', kernel_initializer=kernel_initializer, name=conv_name_4, reuse=reuse)
shortcut = tf.layers.batch_normalization(shortcut, training=is_training, name=bn_name_4, reuse=reuse)
x = tf.add(shortcut, x)
x = tf.nn.relu(x)
return x
def resnet18(input_tensor, is_training=True, pooling_and_fc=True, reuse=False, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()):
x = tf.layers.conv2d(input_tensor, 64, (3,3), strides=(1,1), kernel_initializer=kernel_initializer, use_bias=False, padding='SAME', name='conv1_1/3x3_s1', reuse=reuse)
x = tf.layers.batch_normalization(x, training=is_training, name='bn1_1/3x3_s1', reuse=reuse)
x = tf.nn.relu(x)
x1 = identity_block2d(x, 3, [48, 64, 64], stage=2, block='1b', is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x1 = identity_block2d(x1, 3, [48, 64, 64], stage=3, block='1c', is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x2 = conv_block_2d(x1, 3, [96, 128, 128], stage=3, block='2a', strides=(2,2), is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x2 = identity_block2d(x2, 3, [96, 128, 128], stage=3, block='2b', is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x3 = conv_block_2d(x2, 3, [128, 256, 256], stage=4, block='3a', strides=(2,2), is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x3 = identity_block2d(x3, 3, [128, 256, 256], stage=4, block='3b', is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x4 = conv_block_2d(x3, 3, [256, 512, 512], stage=5, block='4a', strides=(2,2), is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x4 = identity_block2d(x4, 3, [256, 512, 512], stage=5, block='4b', is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
# print('before gap: ', x4)
x4 = tf.reduce_mean(x4, [1,2])
# print('after gap: ', x4)
# flatten = tf.contrib.layers.flatten(x4)
prob = tf.layers.dense(x4, 100, reuse=reuse, kernel_initializer=tf.contrib.layers.xavier_initializer())
# prob = tf.layers.batch_normalization(prob, training=is_training, name='fbn', reuse=reuse)
# print('prob', prob)
return prob
def resnet34(input_tensor, is_training=True, pooling_and_fc=True, reuse=False, kernel_initializer=tf.contrib.layers.variance_scaling_initializer()):
x = tf.layers.conv2d(input_tensor, 64, (3,3), strides=(1,1), kernel_initializer=kernel_initializer, use_bias=False, padding='SAME', name='conv1_1/3x3_s1', reuse=reuse)
x = tf.layers.batch_normalization(x, training=is_training, name='bn1_1/3x3_s1', reuse=reuse)
x = tf.nn.relu(x)
x1 = identity_block2d(x, 3, [48, 64, 64], stage=1, block='1a', is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x1 = identity_block2d(x1, 3, [48, 64, 64], stage=1, block='1b', is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x1 = identity_block2d(x1, 3, [48, 64, 64], stage=1, block='1c', is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x2 = conv_block_2d(x1, 3, [96, 128, 128], stage=2, block='2a', strides=(2,2), is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x2 = identity_block2d(x2, 3, [96, 128, 128], stage=2, block='2b', is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x2 = identity_block2d(x2, 3, [96, 128, 128], stage=2, block='2c', is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x2 = identity_block2d(x2, 3, [96, 128, 128], stage=2, block='2d', is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x3 = conv_block_2d(x2, 3, [128, 256, 256], stage=3, block='3a', strides=(2,2), is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x3 = identity_block2d(x3, 3, [128, 256, 256], stage=3, block='3b', is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x3 = identity_block2d(x3, 3, [128, 256, 256], stage=3, block='3c', is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x3 = identity_block2d(x3, 3, [128, 256, 256], stage=3, block='3d', is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x3 = identity_block2d(x3, 3, [128, 256, 256], stage=3, block='3e', is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x3 = identity_block2d(x3, 3, [128, 256, 256], stage=3, block='3f', is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x4 = conv_block_2d(x3, 3, [256, 512, 512], stage=4, block='4a', strides=(2,2), is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x4 = identity_block2d(x4, 3, [256, 512, 512], stage=4, block='4b', is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
x4 = identity_block2d(x4, 3, [256, 512, 512], stage=4, block='4c', is_training=is_training, reuse=reuse, kernel_initializer=kernel_initializer)
# print('before gap: ', x4)
x4 = tf.reduce_mean(x4, [1,2])
# print('after gap: ', x4)
# flatten = tf.contrib.layers.flatten(x4)
prob = tf.layers.dense(x4, 100, reuse=reuse, kernel_initializer=tf.contrib.layers.xavier_initializer(), bias_initializer=tf.zeros_initializer())
# prob = tf.layers.batch_normalization(prob, training=is_training, name='fbn', reuse=reuse)
# print('prob', prob)
return prob
|
data_managers/data_manager_homer_preparsed/data_manager/homer_genome_preparse.py
|
supernord/tools-iuc
| 142 |
130586
|
<gh_stars>100-1000
#!/usr/bin/env python
# <NAME> for bowtie2
# Modified by <NAME> for homer
from __future__ import print_function
import json
import optparse
import os
import subprocess
import sys
DEFAULT_DATA_TABLE_NAME = "homer_preparse"
def get_id_name(params, dbkey, fasta_description=None):
# TODO: ensure sequence_id is unique and does not already appear in location file
sequence_id = params['param_dict']['sequence_id']
if not sequence_id:
sequence_id = dbkey
sequence_name = params['param_dict']['sequence_name']
if not sequence_name:
sequence_name = fasta_description
if not sequence_name:
sequence_name = dbkey
return sequence_id, sequence_name
def homer_preparse(data_manager_dict, fasta_filename, params, target_directory, dbkey, sequence_id,
sequence_name, size, mask, version,
data_table_name=DEFAULT_DATA_TABLE_NAME):
args = ['preparseGenome.pl', fasta_filename, '-size', str(size), '-preparsedDir', target_directory]
if mask:
args.append('-mask')
proc = subprocess.Popen(args=args, shell=False, cwd=target_directory)
return_code = proc.wait()
if return_code:
print("Error preparsing genome.", file=sys.stderr)
sys.exit(return_code)
mask_suffix = 'r' if mask else ''
mask_suffix_name = ' masked' if mask else ''
data_table_entry = dict(value=sequence_id + mask_suffix + '_' + str(size), dbkey=dbkey,
mask=str(mask), size=str(size), name=sequence_name + mask_suffix_name + ' (' + str(size) + 'bp)',
path=sequence_id + mask_suffix + '_' + str(size),
path_fasta=fasta_filename,
version=version)
_add_data_table_entry(data_manager_dict, data_table_name, data_table_entry)
def _add_data_table_entry(data_manager_dict, data_table_name, data_table_entry):
data_manager_dict['data_tables'] = data_manager_dict.get('data_tables', {})
data_manager_dict['data_tables'][data_table_name] = data_manager_dict['data_tables'].get(data_table_name, [])
data_manager_dict['data_tables'][data_table_name].append(data_table_entry)
return data_manager_dict
def main():
parser = optparse.OptionParser()
parser.add_option('-f', '--fasta_filename', dest='fasta_filename', action='store', type="string", default=None, help='fasta_filename')
parser.add_option('-d', '--fasta_dbkey', dest='fasta_dbkey', action='store', type="string", default=None, help='fasta_dbkey')
parser.add_option('-t', '--fasta_description', dest='fasta_description', action='store', type="string", default=None, help='fasta_description')
parser.add_option('-s', '--size', dest='size', action='store', type="int", default=200, help='fragment size')
parser.add_option('-m', '--mask', dest='mask', action='store_true', default=False, help='mask the lower case bases (repeats)')
parser.add_option('-n', '--data_table_name', dest='data_table_name', action='store', type="string", default=None, help='data_table_name')
parser.add_option('--index_version', dest='index_version', action='store', type="string", default=None, help='index version')
(options, args) = parser.parse_args()
filename = args[0]
with open(filename) as fh:
params = json.load(fh)
target_directory = params['output_data'][0]['extra_files_path']
os.mkdir(target_directory)
data_manager_dict = {}
dbkey = options.fasta_dbkey
if dbkey in [None, '', '?']:
raise Exception('"%s" is not a valid dbkey. You must specify a valid dbkey.' % (dbkey))
sequence_id, sequence_name = get_id_name(params, dbkey=dbkey, fasta_description=options.fasta_description)
# preparse the genome
homer_preparse(data_manager_dict, options.fasta_filename, params, target_directory, dbkey, sequence_id,
sequence_name, options.size, options.mask, options.index_version,
data_table_name=options.data_table_name or DEFAULT_DATA_TABLE_NAME)
# save info to json file
with open(filename, 'w') as fh:
json.dump(data_manager_dict, fh, sort_keys=True)
if __name__ == "__main__":
main()
|
samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/BasicLinearTransforms.py
|
thisisgopalmandal/opencv
| 56,632 |
130592
|
<reponame>thisisgopalmandal/opencv
from __future__ import print_function
from builtins import input
import cv2 as cv
import numpy as np
import argparse
# Read image given by user
## [basic-linear-transform-load]
parser = argparse.ArgumentParser(description='Code for Changing the contrast and brightness of an image! tutorial.')
parser.add_argument('--input', help='Path to input image.', default='lena.jpg')
args = parser.parse_args()
image = cv.imread(cv.samples.findFile(args.input))
if image is None:
print('Could not open or find the image: ', args.input)
exit(0)
## [basic-linear-transform-load]
## [basic-linear-transform-output]
new_image = np.zeros(image.shape, image.dtype)
## [basic-linear-transform-output]
## [basic-linear-transform-parameters]
alpha = 1.0 # Simple contrast control
beta = 0 # Simple brightness control
# Initialize values
print(' Basic Linear Transforms ')
print('-------------------------')
try:
alpha = float(input('* Enter the alpha value [1.0-3.0]: '))
beta = int(input('* Enter the beta value [0-100]: '))
except ValueError:
print('Error, not a number')
## [basic-linear-transform-parameters]
# Do the operation new_image(i,j) = alpha*image(i,j) + beta
# Instead of these 'for' loops we could have used simply:
# new_image = cv.convertScaleAbs(image, alpha=alpha, beta=beta)
# but we wanted to show you how to access the pixels :)
## [basic-linear-transform-operation]
for y in range(image.shape[0]):
for x in range(image.shape[1]):
for c in range(image.shape[2]):
new_image[y,x,c] = np.clip(alpha*image[y,x,c] + beta, 0, 255)
## [basic-linear-transform-operation]
## [basic-linear-transform-display]
# Show stuff
cv.imshow('Original Image', image)
cv.imshow('New Image', new_image)
# Wait until user press some key
cv.waitKey()
## [basic-linear-transform-display]
|
pyairctrl/http_client.py
|
isvogor-foi/py-air-control
| 203 |
130600
|
<filename>pyairctrl/http_client.py
"""HTTP Client."""
# pylint: disable=invalid-name, missing-class-docstring, missing-function-docstring
import base64
import binascii
import configparser
import json
import os
import random
import socket
import urllib.request
import xml.etree.ElementTree as ET
from collections import OrderedDict
from Cryptodome.Cipher import AES
from Cryptodome.Util.Padding import pad, unpad
G = int(
"<KEY>",
16,
)
P = int(
"B10B8F96A080E01DDE92DE5EAE5D54EC52C99FBCFB06A3C69A6A9DCA52D23B616073E28675A23D189838EF1E2EE652C013ECB4AEA906112324975C3CD49B83BFACCBDD7D90C4BD7098488E9C219A73724EFFD6FAE5644738FAA31A4FF55BCCC0A151AF5F0DC8B4BD45BF37DF365C1A65E68CFDA76D4DA708DF1FB2BC2E4A4371",
16,
)
def aes_decrypt(data, key):
iv = bytes(16)
cipher = AES.new(key, AES.MODE_CBC, iv)
return cipher.decrypt(data)
def encrypt(values, key):
# add two random bytes in front of the body
data = "AA" + json.dumps(values)
data = pad(bytearray(data, "ascii"), 16, style="pkcs7")
iv = bytes(16)
cipher = AES.new(key, AES.MODE_CBC, iv)
data_enc = cipher.encrypt(data)
return base64.b64encode(data_enc)
def decrypt(data, key):
payload = base64.b64decode(data)
data = aes_decrypt(payload, key)
# response starts with 2 random bytes, exclude them
response = unpad(data, 16, style="pkcs7")[2:]
return response.decode("ascii")
class HTTPAirClient:
@staticmethod
def ssdp(timeout=1, repeats=3):
addr = "172.16.58.3"
port = 1900
msg = "\r\n".join(
[
"M-SEARCH * HTTP/1.1",
"HOST: {}:{}".format(addr, port),
"ST: urn:philips-com:device:DiProduct:1",
"MX: 1",
'MAN: "ssdp:discover"',
"",
"",
]
).encode("ascii")
urls = {}
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, "SO_REUSEPORT"):
# SO_REUSEPORT is not supported on some systems
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
s.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 20)
s.settimeout(timeout)
for i in range(repeats):
s.sendto(msg, (addr, port))
try:
while True:
data, (ip, _) = s.recvfrom(1024)
url = next(
(
x
for x in data.decode("ascii").splitlines()
if x.startswith("LOCATION: ")
),
None,
)
urls.update({ip: url[10:]})
except socket.timeout:
pass
if len(urls):
break
resp = []
for ip in urls.keys():
with urllib.request.urlopen(urls[ip]) as response:
xml = ET.fromstring(response.read())
resp.append({"ip": ip})
ns = {"urn": "urn:schemas-upnp-org:device-1-0"}
for d in xml.findall("urn:device", ns):
for t in ["modelName", "modelNumber", "friendlyName"]:
resp[-1].update({t: d.find("urn:" + t, ns).text})
return resp
def __init__(self, host, debug=False):
self._host = host
self._session_key = None
self._debug = debug
self.load_key()
def _get_key(self):
if self._debug:
print("Exchanging secret key with the device ...")
url = "http://{}/di/v1/products/0/security".format(self._host)
a = random.getrandbits(256)
A = pow(G, a, P)
data = json.dumps({"diffie": format(A, "x")})
data_enc = data.encode("ascii")
req = urllib.request.Request(url=url, data=data_enc, method="PUT")
with urllib.request.urlopen(req) as response:
resp = response.read().decode("ascii")
dh = json.loads(resp)
key = dh["key"]
B = int(dh["hellman"], 16)
s = pow(B, a, P)
s_bytes = s.to_bytes(128, byteorder="big")[:16]
session_key = aes_decrypt(bytes.fromhex(key), s_bytes)
self._session_key = session_key[:16]
self._save_key()
def _save_key(self):
config = configparser.ConfigParser()
fpath = os.path.expanduser("~/.pyairctrl")
config.read(fpath)
if "keys" not in config.sections():
config["keys"] = {}
hex_key = binascii.hexlify(self._session_key).decode("ascii")
config["keys"][self._host] = hex_key
if self._debug:
print("Saving session_key {} to {}".format(hex_key, fpath))
with open(fpath, "w") as f:
config.write(f)
def load_key(self):
fpath = os.path.expanduser("~/.pyairctrl")
if os.path.isfile(fpath):
config = configparser.ConfigParser()
config.read(fpath)
if "keys" in config and self._host in config["keys"]:
hex_key = config["keys"][self._host]
self._session_key = bytes.fromhex(hex_key)
self._check_key()
else:
self._get_key()
else:
self._get_key()
return self._session_key
def _check_key(self):
url = "http://{}/di/v1/products/1/air".format(self._host)
self._get(url)
def set_values(self, values):
body = encrypt(values, self._session_key)
url = "http://{}/di/v1/products/1/air".format(self._host)
req = urllib.request.Request(url=url, data=body, method="PUT")
with urllib.request.urlopen(req) as response:
resp = response.read()
resp = decrypt(resp.decode("ascii"), self._session_key)
status = json.loads(resp)
return status
def set_wifi(self, ssid, pwd):
values = {}
if ssid:
values["ssid"] = ssid
if pwd:
values["password"] = <PASSWORD>
body = encrypt(values, self._session_key)
url = "http://{}/di/v1/products/0/wifi".format(self._host)
req = urllib.request.Request(url=url, data=body, method="PUT")
with urllib.request.urlopen(req) as response:
resp = response.read()
resp = decrypt(resp.decode("ascii"), self._session_key)
wifi = json.loads(resp)
return wifi
def _get_once(self, url):
with urllib.request.urlopen(url) as response:
resp = response.read()
resp = decrypt(resp.decode("ascii"), self._session_key)
return json.loads(resp, object_pairs_hook=OrderedDict)
def _get(self, url):
try:
return self._get_once(url)
except Exception as e:
if self._debug:
print("GET error: {}".format(str(e)))
print("Will retry after getting a new key ...")
self._get_key()
return self._get_once(url)
def get_status(self, debug=False):
url = "http://{}/di/v1/products/1/air".format(self._host)
status = self._get(url)
return status
def get_wifi(self):
url = "http://{}/di/v1/products/0/wifi".format(self._host)
wifi = self._get(url)
return wifi
def get_firmware(self):
url = "http://{}/di/v1/products/0/firmware".format(self._host)
firmware = self._get(url)
return firmware
def get_filters(self):
url = "http://{}/di/v1/products/1/fltsts".format(self._host)
filters = self._get(url)
return filters
def pair(self, client_id, client_secret):
values = {}
values["Pair"] = ["FI-AIR-AND", client_id, client_secret]
body = encrypt(values, self._session_key)
url = "http://{}/di/v1/products/0/pairing".format(self._host)
req = urllib.request.Request(url=url, data=body, method="PUT")
with urllib.request.urlopen(req) as response:
resp = response.read()
resp = decrypt(resp.decode("ascii"), self._session_key)
resp = json.loads(resp)
return resp
|
examples/multiline_textbox.py
|
pzahemszky/guizero
| 320 |
130603
|
from guizero import App, TextBox, PushButton, Text
def show():
output.value = textbox.value
app = App()
textbox = TextBox(app, multiline=True, height=10, width=50, scrollbar=True)
textbox.value = "hello\ngoodbye\nno way\nthis is a very long stream of text, very long indeed, the best long line of text, its super bigly and very long, I dont think it could possibly be any better particularly as it was created by someone who is super good at creating long lines of text."
button = PushButton(app, text="Print", command=show)
output = Text(app)
app.display()
|
tests/torch/test_extensions_build.py
|
GreenWaves-Technologies/nncf
| 136 |
130619
|
<filename>tests/torch/test_extensions_build.py
import os
import subprocess
import pytest
import pathlib
import shutil
import torch
from tests.common.helpers import TEST_ROOT
from tests.torch.helpers import Command
EXTENSIONS_BUILD_FILENAME = 'extensions_build_checks.py'
@pytest.mark.parametrize("venv_type, package_type,install_type",
[('venv', 'develop', 'GPU')])
def test_force_cuda_build(tmp_venv_with_nncf, install_type, tmp_path, package_type):
'''
Check that CUDA Extensions weren't initially built and \
then with TORCH_CUDA_ARCH_LIST were forced to be built
'''
cuda_home = os.environ.get('CUDA_HOME') or os.environ.get('CUDA_PATH')
if cuda_home is None:
try:
nvcc = subprocess.check_output(['which', 'nvcc'])
cuda_home = os.path.dirname(os.path.dirname(nvcc))
except subprocess.CalledProcessError:
if not cuda_home:
cuda_home = '/usr/local/cuda'
if not os.path.exists(cuda_home):
cuda_home = None
if not cuda_home and not torch.cuda.is_available():
pytest.skip('There is no CUDA on the machine. The test will be skipped')
venv_path = tmp_venv_with_nncf
torch_build_dir = tmp_path / 'extensions'
export_env_variables = "export CUDA_VISIBLE_DEVICES='' export TORCH_EXTENSIONS_DIR={}".format(torch_build_dir)
python_executable_with_venv = ". {0}/bin/activate && {1} && {0}/bin/python".format(venv_path, export_env_variables)
run_path = tmp_path / 'run'
shutil.copy(TEST_ROOT / 'torch' / EXTENSIONS_BUILD_FILENAME, run_path)
torch_ext_dir = pathlib.Path(torch_build_dir)
assert not torch_ext_dir.exists()
mode = 'cpu'
command = Command("{} {}/extensions_build_checks.py {}".format(python_executable_with_venv, run_path, mode),
path=run_path)
command.run()
version_command = Command('{} -c "import torch; print(torch.__version__)"'.format(python_executable_with_venv),
path=run_path)
version_command.run()
torch_version = version_command.output[0].replace('\n', '')
cpu_ext_dir = (torch_ext_dir / 'nncf' / 'quantized_functions_cpu' / torch_version)
assert cpu_ext_dir.exists()
cpu_ext_so = (cpu_ext_dir / 'quantized_functions_cpu.so' )
assert cpu_ext_so.exists()
cuda_ext_dir = (torch_ext_dir / 'nncf'/ 'quantized_functions_cuda' / torch_version)
assert not cuda_ext_dir.exists()
cuda_ext_so = (cuda_ext_dir / 'quantized_functions_cuda.so')
assert not cuda_ext_so.exists()
cpu_ext_dir = (torch_ext_dir / 'nncf' / 'binarized_functions_cpu' / torch_version)
assert cpu_ext_dir.exists()
cpu_ext_so = (cpu_ext_dir / 'binarized_functions_cpu.so')
assert cpu_ext_so.exists()
cuda_ext_dir = (torch_ext_dir / 'nncf' / 'binarized_functions_cuda' / torch_version)
assert not cuda_ext_dir.exists()
cuda_ext_so = (cuda_ext_dir / 'nncf' / torch_version / 'binarized_functions_cuda.so')
assert not cuda_ext_so.exists()
mode = 'cuda'
command = Command("{} {}/extensions_build_checks.py {}".format(python_executable_with_venv, run_path, mode),
path=run_path)
command.run()
cuda_ext_dir = (torch_ext_dir / 'nncf' / 'quantized_functions_cuda' / torch_version)
assert cuda_ext_dir.exists()
cuda_ext_so = (cuda_ext_dir / 'quantized_functions_cuda.so')
assert cuda_ext_so.exists()
cuda_ext_dir = (torch_ext_dir / 'nncf' / 'binarized_functions_cuda' / torch_version)
assert cuda_ext_dir.exists()
cuda_ext_so = (cuda_ext_dir / 'binarized_functions_cuda.so')
assert cuda_ext_so.exists()
|
models/yolo_nano.py
|
RealNewNoob/yolo_nano
| 295 |
130623
|
<reponame>RealNewNoob/yolo_nano<gh_stars>100-1000
import torch
import torch.nn as nn
import torch.nn.functional as F
from .basic_layers import conv1x1, conv3x3, EP, PEP, FCA, YOLOLayer
from utils.stats import build_targets, to_cpu, non_max_suppression
class YOLONano(nn.Module):
def __init__(self, num_classes, image_size):
super(YOLONano, self).__init__()
self.num_classes = num_classes
self.image_size = image_size
self.num_anchors = 3
self.yolo_channels = (self.num_classes + 5) * self.num_anchors
anchors52 = [[10,13], [16,30], [33,23]] # 52x52
anchors26 = [[30,61], [62,45], [59,119]] # 26x26
anchors13 = [[116,90], [156,198], [373,326]] # 13x13
# image: 416x416x3
self.conv1 = conv3x3(3, 12, stride=1) # output: 416x416x12
self.conv2 = conv3x3(12, 24, stride=2) # output: 208x208x24
self.pep1 = PEP(24, 24, 7, stride=1) # output: 208x208x24
self.ep1 = EP(24, 70, stride=2) # output: 104x104x70
self.pep2 = PEP(70, 70, 25, stride=1) # output: 104x104x70
self.pep3 = PEP(70, 70, 24, stride=1) # output: 104x104x70
self.ep2 = EP(70, 150, stride=2) # output: 52x52x150
self.pep4 = PEP(150, 150, 56, stride=1) # output: 52x52x150
self.conv3 = conv1x1(150, 150, stride=1) # output: 52x52x150
self.fca1 = FCA(150, 8) # output: 52x52x150
self.pep5 = PEP(150, 150, 73, stride=1) # output: 52x52x150
self.pep6 = PEP(150, 150, 71, stride=1) # output: 52x52x150
self.pep7 = PEP(150, 150, 75, stride=1) # output: 52x52x150
self.ep3 = EP(150, 325, stride=2) # output: 26x26x325
self.pep8 = PEP(325, 325, 132, stride=1) # output: 26x26x325
self.pep9 = PEP(325, 325, 124, stride=1) # output: 26x26x325
self.pep10 = PEP(325, 325, 141, stride=1) # output: 26x26x325
self.pep11 = PEP(325, 325, 140, stride=1) # output: 26x26x325
self.pep12 = PEP(325, 325, 137, stride=1) # output: 26x26x325
self.pep13 = PEP(325, 325, 135, stride=1) # output: 26x26x325
self.pep14 = PEP(325, 325, 133, stride=1) # output: 26x26x325
self.pep15 = PEP(325, 325, 140, stride=1) # output: 26x26x325
self.ep4 = EP(325, 545, stride=2) # output: 13x13x545
self.pep16 = PEP(545, 545, 276, stride=1) # output: 13x13x545
self.conv4 = conv1x1(545, 230, stride=1) # output: 13x13x230
self.ep5 = EP(230, 489, stride=1) # output: 13x13x489
self.pep17 = PEP(489, 469, 213, stride=1) # output: 13x13x469
self.conv5 = conv1x1(469, 189, stride=1) # output: 13x13x189
self.conv6 = conv1x1(189, 105, stride=1) # output: 13x13x105
# upsampling conv6 to 26x26x105
# concatenating [conv6, pep15] -> pep18 (26x26x430)
self.pep18 = PEP(430, 325, 113, stride=1) # output: 26x26x325
self.pep19 = PEP(325, 207, 99, stride=1) # output: 26x26x325
self.conv7 = conv1x1(207, 98, stride=1) # output: 26x26x98
self.conv8 = conv1x1(98, 47, stride=1) # output: 26x26x47
# upsampling conv8 to 52x52x47
# concatenating [conv8, pep7] -> pep20 (52x52x197)
self.pep20 = PEP(197, 122, 58, stride=1) # output: 52x52x122
self.pep21 = PEP(122, 87, 52, stride=1) # output: 52x52x87
self.pep22 = PEP(87, 93, 47, stride=1) # output: 52x52x93
self.conv9 = conv1x1(93, self.yolo_channels, stride=1, bn=False) # output: 52x52x yolo_channels
self.yolo_layer52 = YOLOLayer(anchors52, num_classes, img_dim=image_size)
# conv7 -> ep6
self.ep6 = EP(98, 183, stride=1) # output: 26x26x183
self.conv10 = conv1x1(183, self.yolo_channels, stride=1, bn=False) # output: 26x26x yolo_channels
self.yolo_layer26 = YOLOLayer(anchors26, num_classes, img_dim=image_size)
# conv5 -> ep7
self.ep7 = EP(189, 462, stride=1) # output: 13x13x462
self.conv11 = conv1x1(462, self.yolo_channels, stride=1, bn=False) # output: 13x13x yolo_channels
self.yolo_layer13 = YOLOLayer(anchors13, num_classes, img_dim=image_size)
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight = nn.init.xavier_normal_(m.weight, gain=0.02)
elif isinstance(m, nn.BatchNorm2d):
nn.init.normal_(m.weight.data, 1.0, 0.02)
m.bias.data.zero_()
def forward(self, x, targets=None):
loss = 0
yolo_outputs = []
image_size = x.size(2)
out = self.conv1(x)
out = self.conv2(out)
out = self.pep1(out)
out = self.ep1(out)
out = self.pep2(out)
out = self.pep3(out)
out = self.ep2(out)
out = self.pep4(out)
out = self.conv3(out)
out = self.fca1(out)
out = self.pep5(out)
out = self.pep6(out)
out_pep7 = self.pep7(out)
out = self.ep3(out_pep7)
out = self.pep8(out)
out = self.pep9(out)
out = self.pep10(out)
out = self.pep11(out)
out = self.pep12(out)
out = self.pep13(out)
out = self.pep14(out)
out_pep15 = self.pep15(out)
out = self.ep4(out_pep15)
out = self.pep16(out)
out = self.conv4(out)
out = self.ep5(out)
out = self.pep17(out)
out_conv5 = self.conv5(out)
out = F.interpolate(self.conv6(out_conv5), scale_factor=2)
out = torch.cat([out, out_pep15], dim=1)
out = self.pep18(out)
out = self.pep19(out)
out_conv7 = self.conv7(out)
out = F.interpolate(self.conv8(out_conv7), scale_factor=2)
out = torch.cat([out, out_pep7], dim=1)
out = self.pep20(out)
out = self.pep21(out)
out = self.pep22(out)
out_conv9 = self.conv9(out)
temp, layer_loss = self.yolo_layer52(out_conv9, targets, image_size)
loss += layer_loss
yolo_outputs.append(temp)
out = self.ep6(out_conv7)
out_conv10 = self.conv10(out)
temp, layer_loss = self.yolo_layer26(out_conv10, targets, image_size)
loss += layer_loss
yolo_outputs.append(temp)
out = self.ep7(out_conv5)
out_conv11 = self.conv11(out)
temp, layer_loss = self.yolo_layer13(out_conv11, targets, image_size)
loss += layer_loss
yolo_outputs.append(temp)
yolo_outputs = to_cpu(torch.cat(yolo_outputs, 1))
return yolo_outputs if targets is None else (loss, yolo_outputs)
def name(self):
return "YoloNano"
|
tests/test_core.py
|
victorvasil93/flask-ask
| 2,028 |
130634
|
# -*- coding: utf-8 -*-
import unittest
from aniso8601.timezone import UTCOffset, build_utcoffset
from flask_ask.core import Ask
from datetime import datetime, timedelta
from mock import patch, MagicMock
import json
class FakeRequest(object):
""" Fake out a Flask request for testing purposes for now """
headers = {'Signaturecertchainurl': None, 'Signature': None}
def __init__(self, data):
self.data = json.dumps(data)
class TestCoreRoutines(unittest.TestCase):
""" Tests for core Flask Ask functionality """
def setUp(self):
self.mock_app = MagicMock()
self.mock_app.debug = True
self.mock_app.config = {'ASK_VERIFY_TIMESTAMP_DEBUG': False}
# XXX: this mess implies we should think about tidying up Ask._alexa_request
self.patch_current_app = patch('flask_ask.core.current_app', new=self.mock_app)
self.patch_load_cert = patch('flask_ask.core.verifier.load_certificate')
self.patch_verify_sig = patch('flask_ask.core.verifier.verify_signature')
self.patch_current_app.start()
self.patch_load_cert.start()
self.patch_verify_sig.start()
@patch('flask_ask.core.flask_request',
new=FakeRequest({'request': {'timestamp': 1234},
'session': {'application': {'applicationId': 1}}}))
def test_alexa_request_parsing(self):
ask = Ask()
ask._alexa_request()
def test_parse_timestamp(self):
utc = build_utcoffset('UTC', timedelta(hours=0))
result = Ask._parse_timestamp('2017-07-08T07:38:00Z')
self.assertEqual(datetime(2017, 7, 8, 7, 38, 0, 0, utc), result)
result = Ask._parse_timestamp(1234567890)
self.assertEqual(datetime(2009, 2, 13, 23, 31, 30), result)
with self.assertRaises(ValueError):
Ask._parse_timestamp(None)
def test_tries_parsing_on_valueerror(self):
max_timestamp = 253402300800
# should cause a ValueError normally
with self.assertRaises(ValueError):
datetime.utcfromtimestamp(max_timestamp)
# should safely parse, assuming scale change needed
# note: this assert looks odd, but Py2 handles the parsing
# differently, resulting in a differing timestamp
# due to more granularity of microseconds
result = Ask._parse_timestamp(max_timestamp)
self.assertEqual(datetime(1978, 1, 11, 21, 31, 40).timetuple()[0:6],
result.timetuple()[0:6])
with self.assertRaises(ValueError):
# still raise an error if too large
Ask._parse_timestamp(max_timestamp * 1000)
def tearDown(self):
self.patch_current_app.stop()
self.patch_load_cert.stop()
self.patch_verify_sig.stop()
|
exampleapp/dinosaurs/apps.py
|
jupiterFierce31/A-simple-example-of-a-Django-REST-app-Angular2
| 163 |
130647
|
<reponame>jupiterFierce31/A-simple-example-of-a-Django-REST-app-Angular2<filename>exampleapp/dinosaurs/apps.py
from django.apps import AppConfig
class DinosaursConfig(AppConfig):
name = 'dinosaurs'
|
section-04-research-and-development/preprocessors.py
|
karanvijaygit/DMLM
| 477 |
130658
|
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class TemporalVariableTransformer(BaseEstimator, TransformerMixin):
# Temporal elapsed time transformer
def __init__(self, variables, reference_variable):
if not isinstance(variables, list):
raise ValueError('variables should be a list')
self.variables = variables
self.reference_variable = reference_variable
def fit(self, X, y=None):
# we need this step to fit the sklearn pipeline
return self
def transform(self, X):
# so that we do not over-write the original dataframe
X = X.copy()
for feature in self.variables:
X[feature] = X[self.reference_variable] - X[feature]
return X
# categorical missing value imputer
class Mapper(BaseEstimator, TransformerMixin):
def __init__(self, variables, mappings):
if not isinstance(variables, list):
raise ValueError('variables should be a list')
self.variables = variables
self.mappings = mappings
def fit(self, X, y=None):
# we need the fit statement to accomodate the sklearn pipeline
return self
def transform(self, X):
X = X.copy()
for feature in self.variables:
X[feature] = X[feature].map(self.mappings)
return X
|
tests/pyconverter-test/cases/array_index_type.py
|
jaydeetay/pxt
| 977 |
130704
|
<reponame>jaydeetay/pxt
foo = [7]
i = foo[0]
testNamespace.numberArgument(i)
|
floss/logging_.py
|
mandiant/flare-floss
| 145 |
130715
|
# Copyright (C) 2022 Mandiant, Inc. All Rights Reserved.
import logging
from enum import Enum
TRACE = logging.DEBUG - 1
setattr(logging, "TRACE", TRACE)
logging.addLevelName(TRACE, "TRACE")
class DebugLevel(int, Enum):
NONE = 0
DEFAULT = 1
TRACE = 2
SUPERTRACE = 3
GREY = "\x1b[38;21m"
CYAN = "\x1b[36;21m"
MAUVE = "\x1b[34;21m"
YELLOW = "\x1b[33;21m"
RED = "\x1b[31;21m"
BOLD_RED = "\x1b[31;1m"
RESET = "\x1b[0m"
def make_format(color):
return f"{color}%(levelname)s{RESET}: %(name)s: %(message)s"
FORMATS = {
logging.TRACE: make_format(MAUVE), # type: ignore
logging.DEBUG: make_format(GREY),
logging.INFO: make_format(CYAN),
logging.WARNING: make_format(YELLOW),
logging.ERROR: make_format(RED),
logging.CRITICAL: make_format(BOLD_RED),
}
FORMATTERS = {level: logging.Formatter(FORMATS[level]) for level in FORMATS.keys()}
class ColorFormatter(logging.Formatter):
"""
Logging Formatter to add colors and count warning / errors
via: https://stackoverflow.com/a/56944256/87207
"""
def format(self, record):
return FORMATTERS[record.levelno].format(record)
class LoggerWithTrace(logging.getLoggerClass()): # type: ignore
def trace(self, msg, *args, **kwargs):
self.log(TRACE, msg, *args, **kwargs)
logging.setLoggerClass(LoggerWithTrace)
def getLogger(name) -> LoggerWithTrace:
"""
a logging constructor that guarantees that the TRACE level is available.
use this just like `logging.getLogger`.
because we patch stdlib logging upon import of this module (side-effect),
and we can't be sure how callers order their imports,
then we want to provide a way to ensure that callers can access TRACE consistently.
if callers use `floss.logging.getLogger()` intead of `logging.getLogger()`,
then they'll be guaranteed to have access to TRACE.
"""
return logging.getLogger(name) # type: ignore
|
pororo/tasks/semantic_role_labeling.py
|
jayten42/pororo
| 1,137 |
130758
|
"""Semantic Role Labeling related modeling class"""
from copy import deepcopy
from typing import List, Optional
from pororo.tasks.utils.base import PororoFactoryBase, PororoSimpleBase
class PororoSrlFactory(PororoFactoryBase):
"""
Conduct semantic role labeling
Korean (`charbert.base.ko.srl`)
- dataset: UCorpus
- metric: TBU
- ref: http://nlplab.ulsan.ac.kr/doku.php?id=start
Args:
sent: (str) sentence to be parsed dependency
Returns:
List[Tuple[int, str, int, str]]: token index, token label, token head and its relation
Examples:
>>> srl = Pororo(task="srl", lang="ko")
>>> srl("카터는 역삼에서 카카오브레인으로 출근한다.")
[[('카터는', 'AGT'), ('역삼에서', 'LOC'), ('카카오브레인으로', 'GOL'), ('출근한다.', 'PREDICATE')]]
>>> srl("피고인은 거제에서 400만 원 상당의 순금목걸이를 피해자로부터 강취하였다.")
[[('피고인은', 'AGT'), ('거제에서', '-'), ('400만', '-'), ('원', '-'), ('상당의', '-'), ('순금목걸이를', 'THM'), ('피해자로부터', 'SRC'), ('강취하였다.', 'PREDICATE')]]
"""
def __init__(self, task: str, lang: str, model: Optional[str]):
super().__init__(task, lang, model)
@staticmethod
def get_available_langs():
return ["ko"]
@staticmethod
def get_available_models():
return {"ko": ["charbert.base.ko.srl"]}
def load(self, device: str):
"""
Load user-selected task-specific model
Args:
device (str): device information
Returns:
object: User-selected task-specific model
"""
if "charbert" in self.config.n_model:
from pororo.models.brainbert import RobertaLabelModel
from pororo.tasks import PororoPosFactory
model = RobertaLabelModel.load_model(
f"bert/{self.config.n_model}",
self.config.lang,
).eval().to(device)
tagger = PororoPosFactory(
task="pos",
model="mecab-ko",
lang=self.config.lang,
).load(device)
return PororoBertSRL(model, tagger, self.config)
class PororoBertSRL(PororoSimpleBase):
def __init__(self, model, tagger, config):
super().__init__(config)
self._tagger = tagger
self._model = model
self._verbs = ["VV", "VA", "XSV", "XSA", "VCN"]
def _split_list(self, lst: List, seperator: str):
"""
Split list using seperator
Args:
lst (list): PoS tagger pair list
seperator (str): seperator token
Returns:
list: splitted list of list
"""
res = []
tmp = []
for elem in lst:
if elem[0] == seperator:
res.append(tmp)
tmp = []
continue
tmp.append(elem)
res.append(tmp)
return res
def _preprocess(self, sent: str) -> str:
"""
Preprocess semantic role labeling input to specify predicate
Args:
sent (str): input sentence
Returns:
str: preprocessed input
"""
words = self._split_list([list(tag) for tag in self._tagger(sent)], " ")
vs = []
for i, word in enumerate(words):
for morph in word:
if morph[1] in self._verbs:
vs.append(i)
break
sents = []
for v in vs:
morphs = deepcopy(words)
morphs[v][0][0] = f"★{morphs[v][0][0]}"
sent, seg = str(), str()
for elems in morphs:
for pair in elems:
morph, tag = pair
tag = f"{tag} "
if morph == " ":
sent += "▁ "
seg += tag
continue
chars = [c for c in morph]
sent += f"{' '.join(chars)} "
seg += tag * len(chars)
sent += "▁ "
seg += "SPACE "
sents.append((sent.strip(), seg.strip()))
return sents
def _postprocess(self, result: List, origin: str):
"""
Postprocess semantic role labeling model inference result
Args:
result (List): inferenced semantic roles
origin (str): original query string
Returns:
List[Tuple]: postprocessed result
"""
tokens = origin.split()
fin = []
for res in result:
res = self._split_list(res, "▁")
tmp = []
for i, token in enumerate(tokens):
if "★" in res[i][0][0]:
tmp.append((token, "PREDICATE"))
continue
tmp.append((token, res[i][0][1]))
fin.append(tmp)
return fin
def predict(self, sent: str, **kwargs):
"""
Conduct semantic role labeling
Args:
sent: (str) sentence to be parsed dependency
Returns:
List[Tuple[int, str, int, str]]: token index, token label, token head and its relation
"""
preproc = self._preprocess(sent)
if not preproc:
return "There is NO predicate to be labeled"
res = []
for p in preproc:
res.append(self._model.predict_srl(p[0], p[1]))
return self._postprocess(res, sent)
|
quantities/constants/electron.py
|
502E532E/python-quantities
| 105 |
130765
|
"""
"""
from ._utils import _cd
from ..unitquantity import UnitConstant
e = elementary_charge = UnitConstant(
'elementary_charge',
_cd('elementary charge'),
symbol='e'
)
elementary_charge_over_h = UnitConstant(
'elementary_charge_over_h',
_cd('elementary charge over h'),
symbol='e/h'
)
Faraday_constant = UnitConstant(
'Faraday_constant',
_cd('Faraday constant'),
symbol='F'
)
#F_star = Faraday_constant_for_conventional_electric_current = UnitConstant(
# _cd('Faraday constant for conventional electric current') what is a unit of C_90?
r_e = classical_electron_radius = UnitConstant(
'classical_electron_radius',
_cd('classical electron radius'),
symbol='r_e',
u_symbol='rₑ'
)
m_e = electron_mass = UnitConstant(
'electron_mass',
_cd('electron mass'),
symbol='m_e',
u_symbol='mₑ'
)
lambda_C = Compton_wavelength = UnitConstant(
'Compton_wavelength',
_cd('Compton wavelength'),
symbol='lambda_C',
u_symbol='λ_C'
)
Compton_wavelength_over_2_pi = UnitConstant(
'Compton_wavelength_over_2_pi',
_cd('Compton wavelength over 2 pi'),
symbol='lambdabar_C',
u_symbol='ƛ_C'
)
electron_charge_to_mass_quotient = UnitConstant(
'electron_charge_to_mass_quotient',
_cd('electron charge to mass quotient'),
symbol='(-e/m_e)',
u_symbol='(-e/mₑ)'
)
g_e = electron_g_factor = UnitConstant(
'electron_g_factor',
_cd('electron g factor'),
symbol='g_e',
u_symbol='gₑ'
)
gamma_e = electron_gyromagnetic_ratio = UnitConstant(
'electron_gyromagnetic_ratio',
_cd('electron gyromagnetic ratio'),
symbol='gamma_e',
u_symbol='γₑ'
)
electron_gyromagnetic_ratio_over_2_pi = UnitConstant(
'electron_gyromagnetic_ratio_over_2_pi',
_cd('electron gyromagnetic ratio over 2 pi'),
symbol='gamma_e/(2*pi)',
u_symbol='γₑ/(2·π)'
)
mu_e = electron_magnetic_moment = UnitConstant(
'electron_magnetic_moment',
_cd('electron magnetic moment'),
symbol='mu_e',
u_symbol='μₑ'
)
a_e = electron_magnetic_moment_anomaly = UnitConstant(
'electron_magnetic_moment_anomaly',
_cd('electron magnetic moment anomaly'),
symbol='a_e',
u_symbol='aₑ'
)
eV = electron_volt = UnitConstant(
'electron_volt',
_cd('electron volt'),
symbol='eV'
)
sigma_e = Thomson_cross_section = UnitConstant(
'Thomson_cross_section',
_cd('Thomson cross section'),
symbol='sigma_e',
u_symbol='σₑ'
)
mu_B = Bohr_magneton = UnitConstant(
'Bohr_magneton',
_cd('Bohr magneton'),
symbol='mu_B',
u_symbol='μ_B'
)
Bohr_magneton_in_Hz_per_T = UnitConstant(
'Bohr_magneton_in_Hz_per_T',
_cd('Bohr magneton in Hz/T')
)
Bohr_magneton_in_inverse_meters_per_tesla = UnitConstant(
'Bohr_magneton_in_inverse_meters_per_tesla',
_cd('Bohr magneton in inverse meters per tesla')
)
Bohr_magneton_in_K_per_T = UnitConstant(
'Bohr_magneton_in_K_per_T',
_cd('Bohr magneton in K/T')
)
electron_mass_energy_equivalent = UnitConstant(
'electron_mass_energy_equivalent',
_cd('electron mass energy equivalent'),
symbol='(m_e*c**2)',
u_symbol='(mₑ·c²)'
)
electron_mass_energy_equivalent_in_MeV = UnitConstant(
'electron_mass_energy_equivalent_in_MeV',
_cd('electron mass energy equivalent in MeV')
)
electron_mass_in_u = UnitConstant(
'electron_mass_in_u',
_cd('electron mass in u')
)
electron_molar_mass = UnitConstant(
'electron_molar_mass',
_cd('electron molar mass'),
symbol='M_e',
u_symbol='Mₑ'
)
electron_deuteron_mass_ratio = UnitConstant(
'electron_deuteron_mass_ratio',
_cd('electron-deuteron mass ratio'),
symbol='(m_e/m_d)',
u_symbol='(mₑ/m_d)'
)
electron_muon_mass_ratio = UnitConstant(
'electron_muon_mass_ratio',
_cd('electron-muon mass ratio'),
symbol='(m_e/m_mu)',
u_symbol='(mₑ/m_μ)'
)
electron_neutron_mass_ratio = UnitConstant(
'electron_neutron_mass_ratio',
_cd('electron-neutron mass ratio'),
symbol='(m_e/m_n)',
u_symbol='(mₑ/m_n)'
)
electron_proton_mass_ratio = UnitConstant(
'electron_proton_mass_ratio',
_cd('electron-proton mass ratio'),
symbol='(m_e/m_p)',
u_symbol='(mₑ/m_p)'
)
electron_tau_mass_ratio = UnitConstant(
'electron_tau_mass_ratio',
_cd('electron-tau mass ratio'),
symbol='(m_e/m_tau)',
u_symbol='(mₑ/m_τ)'
)
electron_to_alpha_particle_mass_ratio = UnitConstant(
'electron_to_alpha_particle_mass_ratio',
_cd('electron to alpha particle mass ratio'),
symbol='(m_e/m_alpha)',
u_symbol='(mₑ/m_α)'
)
electron_deuteron_magnetic_moment_ratio = UnitConstant(
'electron_deuteron_magnetic_moment_ratio',
_cd('electron-deuteron magnetic moment ratio'),
symbol='(mu_e/mu_d)',
u_symbol='(μₑ/μ_d)'
)
electron_magnetic_moment_to_Bohr_magneton_ratio = UnitConstant(
'electron_magnetic_moment_to_Bohr_magneton_ratio',
_cd('electron magnetic moment to Bohr magneton ratio'),
symbol='(mu_e/mu_B)',
u_symbol='(μₑ/μ_B)'
)
electron_magnetic_moment_to_nuclear_magneton_ratio = UnitConstant(
'electron_magnetic_moment_to_nuclear_magneton_ratio',
_cd('electron magnetic moment to nuclear magneton ratio'),
symbol='(mu_e/mu_N)',
u_symbol='(μₑ/μ_N)'
)
electron_muon_magnetic_moment_ratio = UnitConstant(
'electron_muon_magnetic_moment_ratio',
_cd('electron-muon magnetic moment ratio'),
symbol='(mu_e/mu_mu)',
u_symbol='(μₑ/μ_μ)'
)
electron_neutron_magnetic_moment_ratio = UnitConstant(
'electron_neutron_magnetic_moment_ratio',
_cd('electron-neutron magnetic moment ratio'),
symbol='(mu_e/mu_n)',
u_symbol='(μₑ/μ_n)'
)
electron_proton_magnetic_moment_ratio = UnitConstant(
'electron_proton_magnetic_moment_ratio',
_cd('electron-proton magnetic moment ratio'),
symbol='(mu_e/mu_p)',
u_symbol='(μₑ/μ_p)'
)
electron_to_shielded_helion_magnetic_moment_ratio = UnitConstant(
'electron_to_shielded_helion_magnetic_moment_ratio',
_cd('electron to shielded helion magnetic moment ratio'),
symbol='(mu_e/muprime_h)',
u_symbol='(μₑ/μ′_h)'
)
electron_to_shielded_proton_magnetic_moment_ratio = UnitConstant(
'electron_to_shielded_proton_magnetic_moment_ratio',
_cd('electron to shielded proton magnetic moment ratio'),
symbol='(mu_e/muprime_p)',
u_symbol='(μₑ/μ′_p)'
)
electron_volt_atomic_mass_unit_relationship = UnitConstant(
'electron_volt_atomic_mass_unit_relationship',
_cd('electron volt-atomic mass unit relationship')
)
electron_volt_hartree_relationship = UnitConstant(
'electron_volt_hartree_relationship',
_cd('electron volt-hartree relationship')
)
electron_volt_hertz_relationship = UnitConstant(
'electron_volt_hertz_relationship',
_cd('electron volt-hertz relationship')
)
electron_volt_inverse_meter_relationship = UnitConstant(
'electron_volt_inverse_meter_relationship',
_cd('electron volt-inverse meter relationship')
)
electron_volt_joule_relationship = UnitConstant(
'electron_volt_joule_relationship',
_cd('electron volt-joule relationship')
)
electron_volt_kelvin_relationship = UnitConstant(
'electron_volt_kelvin_relationship',
_cd('electron volt-kelvin relationship')
)
electron_volt_kilogram_relationship = UnitConstant(
'electron_volt_kilogram_relationship',
_cd('electron volt-kilogram relationship')
)
hertz_electron_volt_relationship = UnitConstant(
'hertz_electron_volt_relationship',
_cd('hertz-electron volt relationship')
)
inverse_meter_electron_volt_relationship = UnitConstant(
'inverse_meter_electron_volt_relationship',
_cd('inverse meter-electron volt relationship')
)
joule_electron_volt_relationship = UnitConstant(
'joule_electron_volt_relationship',
_cd('joule-electron volt relationship')
)
kelvin_electron_volt_relationship = UnitConstant(
'kelvin_electron_volt_relationship',
_cd('kelvin-electron volt relationship')
)
kilogram_electron_volt_relationship = UnitConstant(
'kilogram_electron_volt_relationship',
_cd('kilogram-electron volt relationship')
)
del UnitConstant, _cd
|
desktop/core/ext-py/phoenixdb-1.1.0/phoenixdb/sqlalchemy_phoenix.py
|
rohankumardubey/hue
| 5,079 |
130776
|
<filename>desktop/core/ext-py/phoenixdb-1.1.0/phoenixdb/sqlalchemy_phoenix.py
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import phoenixdb
from sqlalchemy import types
from sqlalchemy.engine.default import DefaultDialect, DefaultExecutionContext
from sqlalchemy.exc import CompileError
from sqlalchemy.sql.compiler import DDLCompiler
from sqlalchemy.types import BIGINT, BOOLEAN, CHAR, DATE, DECIMAL, FLOAT, INTEGER, NUMERIC,\
SMALLINT, TIME, TIMESTAMP, VARBINARY, VARCHAR
if sys.version_info.major == 3:
from urllib.parse import urlunsplit, SplitResult, urlencode
else:
from urllib import urlencode
from urlparse import urlunsplit, SplitResult
class PhoenixDDLCompiler(DDLCompiler):
def visit_primary_key_constraint(self, constraint):
if constraint.name is None:
raise CompileError("Can't create primary key without a name.")
return DDLCompiler.visit_primary_key_constraint(self, constraint)
AUTOCOMMIT_REGEXP = re.compile(
r"\s*(?:UPDATE|UPSERT|CREATE|DELETE|DROP|ALTER)", re.I | re.UNICODE
)
class PhoenixExecutionContext(DefaultExecutionContext):
def should_autocommit_text(self, statement):
return AUTOCOMMIT_REGEXP.match(statement)
class PhoenixDialect(DefaultDialect):
'''Phoenix dialect
dialect:: phoenix
:name: Phoenix
note::
The Phoenix dialect for SQLAlchemy is incomplete. It implements the functions required by Hue
for basic operation, but little else.
Connecting
----------
The connection URL has the format of phoenix://host:port
This format does not allow for specifying the http scheme, or the URL path the the server uses.
Setting tls=True sets the server URL scheme to https.
If the path arg is set , it used as the path of the server URL.
The phoenix-specific authentication options can be set via the standard connect_args argument.
Connecting to an unsecure server::
create_engine('phoenix://localhost:8765')
Connecting to a secure server via SPNEGO (after kinit)::
create_engine('phoenix://localhost:8765', tls=True, connect_args={'authentication': 'SPNEGO'})
Connecting to a secure server via Knox::
create_engine('phoenix://localhost:8765', tls=True, path='/gateway/avatica/'\
connect_args={'authentication':'BASIC', 'avatica_user':'user', 'avatica_password':'password'})
'''
name = "phoenix"
driver = "phoenixdb"
ddl_compiler = PhoenixDDLCompiler
execution_ctx_cls = PhoenixExecutionContext
def __init__(self, tls=False, path='/', **opts):
'''
:param tls:
If True, then use https for connecting, otherwise use http
:param path:
The path component of the connection URL
'''
# There is no way to pass these via the SqlAlchemy url object
self.tls = tls
self.path = path
super(PhoenixDialect, self).__init__(self, **opts)
@classmethod
def dbapi(cls):
return phoenixdb
def create_connect_args(self, url):
connect_args = dict()
if url.username is not None:
connect_args['user'] = url.username
if url.password is not None:
connect_args['password'] = url.username
phoenix_url = urlunsplit(SplitResult(
scheme='https' if self.tls else 'http',
netloc='{}:{}'.format(url.host, 8765 if url.port is None else url.port),
path=self.path,
query=urlencode(url.query),
fragment='',
))
return [phoenix_url], connect_args
def has_table(self, connection, table_name, schema=None, **kw):
if schema is None:
schema = ''
return bool(connection.connect().connection.meta().get_tables(
tableNamePattern=table_name,
schemaPattern=schema,
typeList=('TABLE', 'SYSTEM_TABLE')))
def get_schema_names(self, connection, **kw):
schemas = connection.connect().connection.meta().get_schemas()
schema_names = [schema['TABLE_SCHEM'] for schema in schemas]
# Phoenix won't return the default schema if there aren't any tables in it
if '' not in schema_names:
schema_names.insert(0, '')
return schema_names
def get_table_names(self, connection, schema=None, order_by=None, **kw):
'''order_by is ignored'''
if schema is None:
schema = ''
tables = connection.connect().connection.meta().get_tables(
schemaPattern=schema, typeList=('TABLE', 'SYSTEM TABLE'))
return [table['TABLE_NAME'] for table in tables]
def get_view_names(self, connection, schema=None, **kw):
if schema is None:
schema = ''
return connection.connect().connection.meta().get_tables(schemaPattern=schema,
typeList=('VIEW'))
def get_columns(self, connection, table_name, schema=None, **kw):
if schema is None:
schema = ''
raw = connection.connect().connection.meta().get_columns(
schemaPattern=schema, tableNamePattern=table_name)
return [self._map_column(row) for row in raw]
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
if schema is None:
schema = ''
raw = connection.connect().connection.meta().get_primary_keys(
schema=schema, table=table_name)
cooked = {
'constrained_columns': []
}
if raw:
cooked['name'] = raw[0]['PK_NAME']
for row in raw:
cooked['constrained_columns'].insert(row['KEY_SEQ'] - 1, row['COLUMN_NAME'])
return cooked
def get_indexes(self, connection, table_name, schema=None, **kw):
if schema is None:
schema = ''
raw = connection.connect().connection.meta().get_index_info(schema=schema, table=table_name)
# We know that Phoenix returns the rows ordered by INDEX_NAME and ORDINAL_POSITION
cooked = []
current = None
for row in raw:
if current is None or row['INDEX_NAME'] != current['name']:
current = {
'name': row['INDEX_NAME'],
'unique': not row['NON_UNIQUE'] is False,
'column_names': [],
}
cooked.append(current)
# Phoenix returns the column names in its internal representation here
# Remove the default CF prefix
canonical_name = row['INDEX_NAME']
if canonical_name.startswith('0:'):
canonical_name = canonical_name[len(':0')]
if canonical_name.startswith(':'):
canonical_name = canonical_name[len(':')]
current['column_names'].append(canonical_name)
return cooked
def get_foreign_keys(self, conn, table_name, schema=None, **kw):
'''Foreign keys are a foreign concept to Phoenix,
and SqlAlchemy cannot parse the DB schema if it's not implemented '''
return []
def _map_column(self, raw):
cooked = {}
cooked['name'] = raw['COLUMN_NAME']
cooked['type'] = COLUMN_DATA_TYPE[raw['TYPE_ID']]
cooked['nullable'] = bool(raw['IS_NULLABLE'])
cooked['autoincrement'] = bool(raw['IS_AUTOINCREMENT'])
cooked['comment'] = raw['REMARKS']
cooked['default'] = None # Not apparent how to get this from the metatdata
return cooked
class TINYINT(types.Integer):
__visit_name__ = "SMALLINT"
class UNSIGNED_TINYINT(types.Integer):
__visit_name__ = "SMALLINT"
class UNSIGNED_INTEGER(types.Integer):
__visit_name__ = "INTEGER"
class DOUBLE(types.FLOAT):
__visit_name__ = "FLOAT"
class UNSIGNED_DOUBLE(types.FLOAT):
__visit_name__ = "FLOAT"
class UNSIGNED_FLOAT(types.FLOAT):
__visit_name__ = "FLOAT"
class UNSIGNED_LONG(types.BIGINT):
__visit_name__ = "BIGINT"
class UNSIGNED_TIME(types.TIME):
__visit_name__ = "TIME"
class UNSIGNED_DATE(types.DATE):
__visit_name__ = "DATE"
class UNSIGNED_TIMESTAMP(types.TIMESTAMP):
__visit_name__ = "TIMESTAMP"
class ROWID (types.String):
__visit_name__ = "VARCHAR"
COLUMN_DATA_TYPE = {
-6: TINYINT,
-5: BIGINT,
-3: VARBINARY,
1: CHAR,
2: NUMERIC,
3: DECIMAL,
4: INTEGER,
5: SMALLINT,
6: FLOAT,
8: DOUBLE,
9: UNSIGNED_INTEGER,
10: UNSIGNED_LONG,
11: UNSIGNED_TINYINT,
12: VARCHAR,
13: ROWID,
14: UNSIGNED_FLOAT,
15: UNSIGNED_DOUBLE,
16: BOOLEAN,
18: UNSIGNED_TIME,
19: UNSIGNED_DATE,
20: UNSIGNED_TIMESTAMP,
91: DATE,
92: TIME,
93: TIMESTAMP
}
|
venv/Lib/site-packages/statsmodels/tsa/statespace/tests/test_simulation_smoothing.py
|
EkremBayar/bayar
| 6,931 |
130785
|
"""
Tests for simulation smoothing
Author: <NAME>
License: Simplified-BSD
"""
import os
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import pandas as pd
from statsmodels import datasets
from statsmodels.tsa.statespace import mlemodel, sarimax, structural
from statsmodels.tsa.statespace.simulation_smoother import (
SIMULATION_STATE, SIMULATION_DISTURBANCE, SIMULATION_ALL)
current_path = os.path.dirname(os.path.abspath(__file__))
class MultivariateVARKnown(object):
"""
Tests for simulation smoothing values in a couple of special cases of
variates. Both computed values and KFAS values are used for comparison
against the simulation smoother output.
"""
@classmethod
def setup_class(cls, missing=None, test_against_KFAS=True,
*args, **kwargs):
cls.test_against_KFAS = test_against_KFAS
# Data
dta = datasets.macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-7-01',
freq='QS')
obs = np.log(dta[['realgdp', 'realcons', 'realinv']]).diff().iloc[1:]
if missing == 'all':
obs.iloc[0:50, :] = np.nan
elif missing == 'partial':
obs.iloc[0:50, 0] = np.nan
elif missing == 'mixed':
obs.iloc[0:50, 0] = np.nan
obs.iloc[19:70, 1] = np.nan
obs.iloc[39:90, 2] = np.nan
obs.iloc[119:130, 0] = np.nan
obs.iloc[119:130, 2] = np.nan
obs.iloc[-10:, :] = np.nan
if test_against_KFAS:
obs = obs.iloc[:9]
# Create the model
mod = mlemodel.MLEModel(obs, k_states=3, k_posdef=3, **kwargs)
mod['design'] = np.eye(3)
mod['obs_cov'] = np.array([
[0.0000640649, 0., 0.],
[0., 0.0000572802, 0.],
[0., 0., 0.0017088585]])
mod['transition'] = np.array([
[-0.1119908792, 0.8441841604, 0.0238725303],
[0.2629347724, 0.4996718412, -0.0173023305],
[-3.2192369082, 4.1536028244, 0.4514379215]])
mod['selection'] = np.eye(3)
mod['state_cov'] = np.array([
[0.0000640649, 0.0000388496, 0.0002148769],
[0.0000388496, 0.0000572802, 0.000001555],
[0.0002148769, 0.000001555, 0.0017088585]])
mod.initialize_approximate_diffuse(1e6)
mod.ssm.filter_univariate = True
cls.model = mod
cls.results = mod.smooth([], return_ssm=True)
cls.sim = cls.model.simulation_smoother()
def test_loglike(self):
assert_allclose(np.sum(self.results.llf_obs), self.true_llf)
def test_simulate_0(self):
n = 10
# Test with all inputs as zeros
measurement_shocks = np.zeros((n, self.model.k_endog))
state_shocks = np.zeros((n, self.model.ssm.k_posdef))
initial_state = np.zeros(self.model.k_states)
obs, states = self.model.ssm.simulate(
nsimulations=n, measurement_shocks=measurement_shocks,
state_shocks=state_shocks, initial_state=initial_state)
assert_allclose(obs, np.zeros((n, self.model.k_endog)))
assert_allclose(states, np.zeros((n, self.model.k_states)))
def test_simulate_1(self):
n = 10
# Test with np.arange / 10 measurement shocks only
measurement_shocks = np.reshape(
np.arange(n * self.model.k_endog) / 10.,
(n, self.model.k_endog))
state_shocks = np.zeros((n, self.model.ssm.k_posdef))
initial_state = np.zeros(self.model.k_states)
obs, states = self.model.ssm.simulate(
nsimulations=n, measurement_shocks=measurement_shocks,
state_shocks=state_shocks, initial_state=initial_state)
assert_allclose(obs, np.reshape(
np.arange(n * self.model.k_endog) / 10.,
(n, self.model.k_endog)))
assert_allclose(states, np.zeros((n, self.model.k_states)))
def test_simulate_2(self):
n = 10
Z = self.model['design']
T = self.model['transition']
# Test with non-zero state shocks and initial state
measurement_shocks = np.zeros((n, self.model.k_endog))
state_shocks = np.ones((n, self.model.ssm.k_posdef))
initial_state = np.ones(self.model.k_states) * 2.5
obs, states = self.model.ssm.simulate(
nsimulations=n, measurement_shocks=measurement_shocks,
state_shocks=state_shocks, initial_state=initial_state)
desired_obs = np.zeros((n, self.model.k_endog))
desired_state = np.zeros((n, self.model.k_states))
desired_state[0] = initial_state
desired_obs[0] = np.dot(Z, initial_state)
for i in range(1, n):
desired_state[i] = np.dot(T, desired_state[i-1]) + state_shocks[i]
desired_obs[i] = np.dot(Z, desired_state[i])
assert_allclose(obs, desired_obs)
assert_allclose(states, desired_state)
def test_simulation_smoothing_0(self):
# Simulation smoothing when setting all variates to zeros
# In this case:
# - unconditional disturbances are zero, because they are simply
# transformed to have the appropriate variance matrix, but keep the
# same mean - of zero
# - generated states are zeros, because initial state is
# zeros and all state disturbances are zeros
# - generated observations are zeros, because states are zeros and all
# measurement disturbances are zeros
# - The simulated state is equal to the smoothed state from the
# original model, because
# simulated state = (generated state - smoothed generated state +
# smoothed state)
# and here generated state = smoothed generated state = 0
# - The simulated measurement disturbance is equal to the smoothed
# measurement disturbance for very similar reasons, because
# simulated measurement disturbance = (
# generated measurement disturbance -
# smoothed generated measurement disturbance +
# smoothed measurement disturbance)
# and here generated measurement disturbance and
# smoothed generated measurement disturbance are zero.
# - The simulated state disturbance is equal to the smoothed
# state disturbance for exactly the same reason as above.
sim = self.sim
Z = self.model['design']
n_disturbance_variates = (
(self.model.k_endog + self.model.ssm.k_posdef) * self.model.nobs)
# Test against known quantities (see above for description)
sim.simulate(disturbance_variates=np.zeros(n_disturbance_variates),
initial_state_variates=np.zeros(self.model.k_states))
assert_allclose(sim.generated_measurement_disturbance, 0)
assert_allclose(sim.generated_state_disturbance, 0)
assert_allclose(sim.generated_state, 0)
assert_allclose(sim.generated_obs, 0)
assert_allclose(sim.simulated_state, self.results.smoothed_state)
if not self.model.ssm.filter_collapsed:
assert_allclose(sim.simulated_measurement_disturbance,
self.results.smoothed_measurement_disturbance)
assert_allclose(sim.simulated_state_disturbance,
self.results.smoothed_state_disturbance)
# Test against R package KFAS values
if self.test_against_KFAS:
path = os.path.join(current_path, 'results',
'results_simulation_smoothing0.csv')
true = pd.read_csv(path)
assert_allclose(sim.simulated_state,
true[['state1', 'state2', 'state3']].T,
atol=1e-7)
assert_allclose(sim.simulated_measurement_disturbance,
true[['eps1', 'eps2', 'eps3']].T,
atol=1e-7)
assert_allclose(sim.simulated_state_disturbance,
true[['eta1', 'eta2', 'eta3']].T,
atol=1e-7)
signals = np.zeros((3, self.model.nobs))
for t in range(self.model.nobs):
signals[:, t] = np.dot(Z, sim.simulated_state[:, t])
assert_allclose(signals, true[['signal1', 'signal2', 'signal3']].T,
atol=1e-7)
def test_simulation_smoothing_1(self):
# Test with measurement disturbance as np.arange / 10., all other
# disturbances are zeros
sim = self.sim
Z = self.model['design']
# Construct the variates
measurement_disturbance_variates = np.reshape(
np.arange(self.model.nobs * self.model.k_endog) / 10.,
(self.model.nobs, self.model.k_endog))
disturbance_variates = np.r_[
measurement_disturbance_variates.ravel(),
np.zeros(self.model.nobs * self.model.ssm.k_posdef)]
# Compute some additional known quantities
generated_measurement_disturbance = np.zeros(
measurement_disturbance_variates.shape)
chol = np.linalg.cholesky(self.model['obs_cov'])
for t in range(self.model.nobs):
generated_measurement_disturbance[t] = np.dot(
chol, measurement_disturbance_variates[t])
generated_model = mlemodel.MLEModel(
generated_measurement_disturbance, k_states=self.model.k_states,
k_posdef=self.model.ssm.k_posdef)
for name in ['design', 'obs_cov', 'transition',
'selection', 'state_cov']:
generated_model[name] = self.model[name]
generated_model.initialize_approximate_diffuse(1e6)
generated_model.ssm.filter_univariate = True
generated_res = generated_model.ssm.smooth()
simulated_state = (
0 - generated_res.smoothed_state + self.results.smoothed_state)
if not self.model.ssm.filter_collapsed:
simulated_measurement_disturbance = (
generated_measurement_disturbance.T -
generated_res.smoothed_measurement_disturbance +
self.results.smoothed_measurement_disturbance)
simulated_state_disturbance = (
0 - generated_res.smoothed_state_disturbance +
self.results.smoothed_state_disturbance)
# Test against known values
sim.simulate(disturbance_variates=disturbance_variates,
initial_state_variates=np.zeros(self.model.k_states))
assert_allclose(sim.generated_measurement_disturbance,
generated_measurement_disturbance)
assert_allclose(sim.generated_state_disturbance, 0)
assert_allclose(sim.generated_state, 0)
assert_allclose(sim.generated_obs,
generated_measurement_disturbance.T)
assert_allclose(sim.simulated_state, simulated_state)
if not self.model.ssm.filter_collapsed:
assert_allclose(sim.simulated_measurement_disturbance,
simulated_measurement_disturbance)
assert_allclose(sim.simulated_state_disturbance,
simulated_state_disturbance)
# Test against R package KFAS values
if self.test_against_KFAS:
path = os.path.join(current_path, 'results',
'results_simulation_smoothing1.csv')
true = pd.read_csv(path)
assert_allclose(sim.simulated_state,
true[['state1', 'state2', 'state3']].T,
atol=1e-7)
assert_allclose(sim.simulated_measurement_disturbance,
true[['eps1', 'eps2', 'eps3']].T,
atol=1e-7)
assert_allclose(sim.simulated_state_disturbance,
true[['eta1', 'eta2', 'eta3']].T,
atol=1e-7)
signals = np.zeros((3, self.model.nobs))
for t in range(self.model.nobs):
signals[:, t] = np.dot(Z, sim.simulated_state[:, t])
assert_allclose(signals, true[['signal1', 'signal2', 'signal3']].T,
atol=1e-7)
def test_simulation_smoothing_2(self):
# Test with measurement and state disturbances as np.arange / 10.,
# initial state variates are zeros.
sim = self.sim
Z = self.model['design']
T = self.model['transition']
# Construct the variates
measurement_disturbance_variates = np.reshape(
np.arange(self.model.nobs * self.model.k_endog) / 10.,
(self.model.nobs, self.model.k_endog))
state_disturbance_variates = np.reshape(
np.arange(self.model.nobs * self.model.ssm.k_posdef) / 10.,
(self.model.nobs, self.model.ssm.k_posdef))
disturbance_variates = np.r_[
measurement_disturbance_variates.ravel(),
state_disturbance_variates.ravel()]
initial_state_variates = np.zeros(self.model.k_states)
# Compute some additional known quantities
generated_measurement_disturbance = np.zeros(
measurement_disturbance_variates.shape)
chol = np.linalg.cholesky(self.model['obs_cov'])
for t in range(self.model.nobs):
generated_measurement_disturbance[t] = np.dot(
chol, measurement_disturbance_variates[t])
generated_state_disturbance = np.zeros(
state_disturbance_variates.shape)
chol = np.linalg.cholesky(self.model['state_cov'])
for t in range(self.model.nobs):
generated_state_disturbance[t] = np.dot(
chol, state_disturbance_variates[t])
generated_obs = np.zeros((self.model.k_endog, self.model.nobs))
generated_state = np.zeros((self.model.k_states, self.model.nobs+1))
chol = np.linalg.cholesky(self.results.initial_state_cov)
generated_state[:, 0] = (
self.results.initial_state + np.dot(chol, initial_state_variates))
for t in range(self.model.nobs):
generated_state[:, t+1] = (np.dot(T, generated_state[:, t]) +
generated_state_disturbance.T[:, t])
generated_obs[:, t] = (np.dot(Z, generated_state[:, t]) +
generated_measurement_disturbance.T[:, t])
generated_model = mlemodel.MLEModel(
generated_obs.T, k_states=self.model.k_states,
k_posdef=self.model.ssm.k_posdef)
for name in ['design', 'obs_cov', 'transition',
'selection', 'state_cov']:
generated_model[name] = self.model[name]
generated_model.initialize_approximate_diffuse(1e6)
generated_model.ssm.filter_univariate = True
generated_res = generated_model.ssm.smooth()
simulated_state = (
generated_state[:, :-1] - generated_res.smoothed_state +
self.results.smoothed_state)
if not self.model.ssm.filter_collapsed:
simulated_measurement_disturbance = (
generated_measurement_disturbance.T -
generated_res.smoothed_measurement_disturbance +
self.results.smoothed_measurement_disturbance)
simulated_state_disturbance = (
generated_state_disturbance.T -
generated_res.smoothed_state_disturbance +
self.results.smoothed_state_disturbance)
# Test against known values
sim.simulate(disturbance_variates=disturbance_variates,
initial_state_variates=np.zeros(self.model.k_states))
assert_allclose(sim.generated_measurement_disturbance,
generated_measurement_disturbance)
assert_allclose(sim.generated_state_disturbance,
generated_state_disturbance)
assert_allclose(sim.generated_state, generated_state)
assert_allclose(sim.generated_obs, generated_obs)
assert_allclose(sim.simulated_state, simulated_state)
if not self.model.ssm.filter_collapsed:
assert_allclose(sim.simulated_measurement_disturbance.T,
simulated_measurement_disturbance.T)
assert_allclose(sim.simulated_state_disturbance,
simulated_state_disturbance)
# Test against R package KFAS values
if self.test_against_KFAS:
path = os.path.join(current_path, 'results',
'results_simulation_smoothing2.csv')
true = pd.read_csv(path)
assert_allclose(sim.simulated_state.T,
true[['state1', 'state2', 'state3']],
atol=1e-7)
assert_allclose(sim.simulated_measurement_disturbance,
true[['eps1', 'eps2', 'eps3']].T,
atol=1e-7)
assert_allclose(sim.simulated_state_disturbance,
true[['eta1', 'eta2', 'eta3']].T,
atol=1e-7)
signals = np.zeros((3, self.model.nobs))
for t in range(self.model.nobs):
signals[:, t] = np.dot(Z, sim.simulated_state[:, t])
assert_allclose(signals, true[['signal1', 'signal2', 'signal3']].T,
atol=1e-7)
class TestMultivariateVARKnown(MultivariateVARKnown):
@classmethod
def setup_class(cls, *args, **kwargs):
super(TestMultivariateVARKnown, cls).setup_class()
cls.true_llf = 39.01246166
class TestMultivariateVARKnownMissingAll(MultivariateVARKnown):
"""
Notes
-----
Cannot test against KFAS because they have a different behavior for
missing entries. When an entry is missing, KFAS does not draw a simulation
smoothed value for that entry, whereas we draw from the unconditional
distribution. It appears there is nothing to definitively recommend one
approach over the other, but it makes it difficult to line up the variates
correctly in order to replicate results.
"""
@classmethod
def setup_class(cls, *args, **kwargs):
super(TestMultivariateVARKnownMissingAll, cls).setup_class(
missing='all', test_against_KFAS=False)
cls.true_llf = 1305.739288
class TestMultivariateVARKnownMissingPartial(MultivariateVARKnown):
@classmethod
def setup_class(cls, *args, **kwargs):
super(TestMultivariateVARKnownMissingPartial, cls).setup_class(
missing='partial', test_against_KFAS=False)
cls.true_llf = 1518.449598
class TestMultivariateVARKnownMissingMixed(MultivariateVARKnown):
@classmethod
def setup_class(cls, *args, **kwargs):
super(TestMultivariateVARKnownMissingMixed, cls).setup_class(
missing='mixed', test_against_KFAS=False)
cls.true_llf = 1117.265303
class TestDFM(TestMultivariateVARKnown):
test_against_KFAS = False
@classmethod
def setup_class(cls, which='none', *args, **kwargs):
# Data
dta = datasets.macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-7-01',
freq='QS')
levels = dta[['realgdp', 'realcons', 'realinv']]
obs = np.log(levels).diff().iloc[1:] * 400
if which == 'all':
obs.iloc[:50, :] = np.nan
obs.iloc[119:130, :] = np.nan
elif which == 'partial':
obs.iloc[0:50, 0] = np.nan
obs.iloc[119:130, 0] = np.nan
elif which == 'mixed':
obs.iloc[0:50, 0] = np.nan
obs.iloc[19:70, 1] = np.nan
obs.iloc[39:90, 2] = np.nan
obs.iloc[119:130, 0] = np.nan
obs.iloc[119:130, 2] = np.nan
# Create the model with typical state space
mod = mlemodel.MLEModel(obs, k_states=2, k_posdef=2, **kwargs)
mod['design'] = np.array([[-32.47143586, 17.33779024],
[-7.40264169, 1.69279859],
[-209.04702853, 125.2879374]])
mod['obs_cov'] = np.diag(
np.array([0.0622668, 1.95666886, 58.37473642]))
mod['transition'] = np.array([[0.29935707, 0.33289005],
[-0.7639868, 1.2844237]])
mod['selection'] = np.eye(2)
mod['state_cov'] = np.array([[1.2, -0.25],
[-0.25, 1.1]])
mod.initialize_approximate_diffuse(1e6)
mod.ssm.filter_univariate = True
mod.ssm.filter_collapsed = True
cls.model = mod
cls.results = mod.smooth([], return_ssm=True)
cls.sim = cls.model.simulation_smoother()
def test_loglike(self):
pass
class MultivariateVAR(object):
"""
More generic tests for simulation smoothing; use actual N(0,1) variates
"""
@classmethod
def setup_class(cls, missing='none', *args, **kwargs):
# Data
dta = datasets.macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-7-01',
freq='QS')
obs = np.log(dta[['realgdp', 'realcons', 'realinv']]).diff().iloc[1:]
if missing == 'all':
obs.iloc[0:50, :] = np.nan
elif missing == 'partial':
obs.iloc[0:50, 0] = np.nan
elif missing == 'mixed':
obs.iloc[0:50, 0] = np.nan
obs.iloc[19:70, 1] = np.nan
obs.iloc[39:90, 2] = np.nan
obs.iloc[119:130, 0] = np.nan
obs.iloc[119:130, 2] = np.nan
obs.iloc[-10:, :] = np.nan
# Create the model
mod = mlemodel.MLEModel(obs, k_states=3, k_posdef=3, **kwargs)
mod['design'] = np.eye(3)
mod['obs_cov'] = np.array([
[0.0000640649, 0., 0.],
[0., 0.0000572802, 0.],
[0., 0., 0.0017088585]])
mod['transition'] = np.array([
[-0.1119908792, 0.8441841604, 0.0238725303],
[0.2629347724, 0.4996718412, -0.0173023305],
[-3.2192369082, 4.1536028244, 0.4514379215]])
mod['selection'] = np.eye(3)
mod['state_cov'] = np.array([
[0.0000640649, 0.0000388496, 0.0002148769],
[0.0000388496, 0.0000572802, 0.000001555],
[0.0002148769, 0.000001555, 0.0017088585]])
mod.initialize_approximate_diffuse(1e6)
mod.ssm.filter_univariate = True
cls.model = mod
cls.results = mod.smooth([], return_ssm=True)
cls.sim = cls.model.simulation_smoother()
def test_loglike(self):
assert_allclose(np.sum(self.results.llf_obs), self.true_llf)
def test_simulation_smoothing(self):
sim = self.sim
Z = self.model['design']
# Simulate with known variates
sim.simulate(disturbance_variates=self.variates[:-3],
initial_state_variates=self.variates[-3:])
# Test against R package KFAS values
assert_allclose(sim.simulated_state.T,
self.true[['state1', 'state2', 'state3']],
atol=1e-7)
assert_allclose(sim.simulated_measurement_disturbance,
self.true[['eps1', 'eps2', 'eps3']].T,
atol=1e-7)
assert_allclose(sim.simulated_state_disturbance,
self.true[['eta1', 'eta2', 'eta3']].T,
atol=1e-7)
signals = np.zeros((3, self.model.nobs))
for t in range(self.model.nobs):
signals[:, t] = np.dot(Z, sim.simulated_state[:, t])
assert_allclose(signals,
self.true[['signal1', 'signal2', 'signal3']].T,
atol=1e-7)
class TestMultivariateVAR(MultivariateVAR):
@classmethod
def setup_class(cls):
super(TestMultivariateVAR, cls).setup_class()
path = os.path.join(current_path, 'results',
'results_simulation_smoothing3_variates.csv')
cls.variates = pd.read_csv(path).values.squeeze()
path = os.path.join(current_path, 'results',
'results_simulation_smoothing3.csv')
cls.true = pd.read_csv(path)
cls.true_llf = 1695.34872
def test_misc():
# Create the model and simulation smoother
dta = datasets.macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-7-01',
freq='QS')
obs = np.log(dta[['realgdp', 'realcons', 'realinv']]).diff().iloc[1:]
mod = sarimax.SARIMAX(obs['realgdp'], order=(1, 0, 0))
mod['design', 0, 0] = 0.
mod['obs_cov', 0, 0] = 1.
mod.update(np.r_[1., 1.])
sim = mod.simulation_smoother()
# Test that the simulation smoother is drawing variates correctly
np.random.seed(1234)
n_disturbance_variates = mod.nobs * (mod.k_endog + mod.k_states)
variates = np.random.normal(size=n_disturbance_variates)
np.random.seed(1234)
sim.simulate()
assert_allclose(sim.generated_measurement_disturbance[:, 0],
variates[:mod.nobs])
assert_allclose(sim.generated_state_disturbance[:, 0],
variates[mod.nobs:])
# Test that we can change the options of the simulations smoother
assert_equal(sim.simulation_output, mod.ssm.smoother_output)
sim.simulation_output = 0
assert_equal(sim.simulation_output, 0)
sim.simulate_state = True
assert_equal(sim.simulation_output, SIMULATION_STATE)
sim.simulate_state = False
assert_equal(sim.simulation_output, 0)
sim.simulate_disturbance = True
assert_equal(sim.simulation_output, SIMULATION_DISTURBANCE)
sim.simulate_disturbance = False
assert_equal(sim.simulation_output, 0)
sim.simulate_all = True
assert_equal(sim.simulation_output, SIMULATION_ALL)
sim.simulate_all = False
assert_equal(sim.simulation_output, 0)
def test_simulation_smoothing_obs_intercept():
nobs = 10
intercept = 100
endog = np.ones(nobs) * intercept
mod = structural.UnobservedComponents(endog, 'rwalk', exog=np.ones(nobs))
mod.update([1, intercept])
sim = mod.simulation_smoother()
sim.simulate(disturbance_variates=np.zeros(mod.nobs * 2),
initial_state_variates=np.zeros(1))
assert_equal(sim.simulated_state[0], 0)
def test_simulation_smoothing_state_intercept():
nobs = 10
intercept = 100
endog = np.ones(nobs) * intercept
mod = sarimax.SARIMAX(endog, order=(0, 0, 0), trend='c',
measurement_error=True)
mod.initialize_known([100], [[0]])
mod.update([intercept, 1., 1.])
sim = mod.simulation_smoother()
sim.simulate(disturbance_variates=np.zeros(mod.nobs * 2),
initial_state_variates=np.zeros(1))
assert_equal(sim.simulated_state[0], intercept)
def test_simulation_smoothing_state_intercept_diffuse():
nobs = 10
intercept = 100
endog = np.ones(nobs) * intercept
# Test without missing values
mod = sarimax.SARIMAX(endog, order=(0, 0, 0), trend='c',
measurement_error=True,
initialization='diffuse')
mod.update([intercept, 1., 1.])
sim = mod.simulation_smoother()
sim.simulate(disturbance_variates=np.zeros(mod.nobs * 2),
initial_state_variates=np.zeros(1))
assert_equal(sim.simulated_state[0], intercept)
# Test with missing values
endog[5] = np.nan
mod = sarimax.SARIMAX(endog, order=(0, 0, 0), trend='c',
measurement_error=True,
initialization='diffuse')
mod.update([intercept, 1., 1.])
sim = mod.simulation_smoother()
sim.simulate(disturbance_variates=np.zeros(mod.nobs * 2),
initial_state_variates=np.zeros(1))
assert_equal(sim.simulated_state[0], intercept)
|
models/tf_Cifar_OC_NN_Models.py
|
chihyunsong/oc-nn
| 203 |
130790
|
<filename>models/tf_Cifar_OC_NN_Models.py
# USAGE
# python test_network.py --model dog_not_dog.model --image images/examples/dog_01.png
# import the necessary packages
import numpy as np
import tensorflow as tf
from keras import backend as K
import time
## Declare the scoring functions
g = lambda x : 1/(1 + tf.exp(-x))
#g = lambda x : x # Linear
def nnScore(X, w, V, g):
# print "X",X.shape
# print "w",w[0].shape
# print "v",V[0].shape
return tf.matmul(g((tf.matmul(X, w))), V)
def relu(x):
y = x
y[y < 0] = 0
return y
import csv
from itertools import izip_longest
import matplotlib as plt
def write_decisionScores2Csv(path, filename, positiveScores, negativeScores):
newfilePath = path+filename
print "Writing file to ", path+filename
poslist = positiveScores.tolist()
neglist = negativeScores.tolist()
# rows = zip(poslist, neglist)
d = [poslist, neglist]
export_data = izip_longest(*d, fillvalue='')
with open(newfilePath, 'w') as myfile:
wr = csv.writer(myfile)
wr.writerow(("Normal", "Anomaly"))
wr.writerows(export_data)
myfile.close()
return
decision_scorePath = "/Users/raghav/Documents/Uni/oc-nn/Decision_Scores/cifar/"
def tf_OneClass_NN_linear(data_train,data_test,nu):
tf.reset_default_graph()
RANDOM_SEED = 42
tf.set_random_seed(RANDOM_SEED)
train_X = data_train
# Layer's sizes
x_size = train_X.shape[1] # Number of input nodes: 4 features and 1 bias
print "Input Shape:",x_size
h_size = 16 # Number of hidden nodes
y_size = 1 # Number of outcomes (3 iris flowers)
D = x_size
K = h_size
theta = np.random.normal(0, 1, K + K*D + 1)
rvalue = np.random.normal(0,1,(len(train_X),y_size))
# nu = 0.1
def init_weights(shape):
""" Weight initialization """
weights = tf.random_normal(shape,mean=0, stddev=1)
return tf.Variable(weights)
def forwardprop(X, w_1, w_2):
"""
Forward-propagation.
IMPORTANT: yhat is not softmax since TensorFlow's softmax_cross_entropy_with_logits() does that internally.
"""
X = tf.cast(X, tf.float32)
w_1 = tf.cast(w_1, tf.float32)
w_2 = tf.cast(w_2, tf.float32)
h = (tf.matmul(X, w_1)) #
yhat = tf.matmul(h, w_2) # The \varphi function
return yhat
g = lambda x : x
def nnScore(X, w, V, g):
X = tf.cast(X, tf.float32)
w = tf.cast(w, tf.float32)
V = tf.cast(V, tf.float32)
return tf.matmul(g((tf.matmul(X, w))), V)
def relu1(x):
y = x
y = tf.nn.relu(x)
return y
def relu(x):
with sess.as_default():
x = x.eval()
y = x
y[y< 0] = 0
# y = tf.nn.relu(x)
return y
def ocnn_obj(theta, X, nu, w1, w2, g,r):
w = w1
V = w2
X = tf.cast(X, tf.float32)
w = tf.cast(w1, tf.float32)
V = tf.cast(w2, tf.float32)
term1 = 0.5 * tf.reduce_sum(w**2)
term2 = 0.5 * tf.reduce_sum(V**2)
term3 = 1/nu * tf.reduce_mean(tf.nn.relu(r - nnScore(X, w, V, g)))
term4 = -r
return term1 + term2 + term3 + term4
# For testing the algorithm
test_X = data_test
# Symbols
X = tf.placeholder("float32", shape=[None, x_size])
r = tf.get_variable("r", dtype=tf.float32,shape=(),trainable=False)
# Weight initializations
w_1 = init_weights((x_size, h_size))
w_2 = init_weights((h_size, y_size))
# Forward propagation
# yhat = forwardprop(X, w_1, w_2)
# predict = tf.argmax(yhat, axis=1)
# Backward propagation
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=yhat))
cost = ocnn_obj(theta, X, nu, w_1, w_2, g,r)
updates = tf.train.AdamOptimizer(0.05).minimize(cost)
# Run optimization routine after initialization
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
rvalue = 0.1
start_time = time.time()
for epoch in range(100):
# Train with each example
sess.run(updates, feed_dict={X: train_X,r:rvalue})
rvalue = nnScore(train_X, w_1, w_2, g)
with sess.as_default():
rvalue = rvalue.eval()
rvalue = np.percentile(rvalue,q=100*nu)
print("Epoch = %d, r = %f"
% (epoch + 1,rvalue))
trainTime = time.time() - start_time
### Get the optimized weights here
start_time = time.time()
train = nnScore(train_X, w_1, w_2, g)
test = nnScore(test_X, w_1, w_2, g)
testTime = time.time() - start_time
with sess.as_default():
arrayTrain = train.eval()
arrayTest = test.eval()
# rstar = r.eval()
rstar =rvalue
sess.close()
print "Session Closed!!!"
pos_decisionScore = arrayTrain-rstar
pos_decisionScore[pos_decisionScore < 0] = 0
neg_decisionScore = arrayTest-rstar
print "&&&&&&&&&&&&"
print pos_decisionScore
print neg_decisionScore
# write_decisionScores2Csv(decision_scorePath, "OneClass_NN_linear.csv", pos_decisionScore, neg_decisionScore)
return [pos_decisionScore, neg_decisionScore,trainTime,testTime]
def tf_OneClass_NN_sigmoid(data_train,data_test,nu):
tf.reset_default_graph()
sess = tf.Session()
train_X = data_train
RANDOM_SEED = 42
tf.set_random_seed(RANDOM_SEED)
# Layer's sizes
x_size = train_X.shape[1] # Number of input nodes: 4 features and 1 bias
print "Input Shape:", x_size
h_size = 16 # Number of hidden nodes
y_size = 1 # Number of outcomes (3 iris flowers)
D = x_size
K = h_size
theta = np.random.normal(0, 1, K + K*D + 1)
rvalue = np.random.normal(0,1,(len(train_X),y_size))
# nu = 0.1
import math
def plotNNFilter(units):
filters = 3
fig = plt.figure(1, figsize=(20, 20))
n_columns = 6
n_rows = math.ceil(filters / n_columns) + 1
for i in range(filters):
plt.subplot(n_rows, n_columns, i + 1)
plt.title('Filter ' + str(i))
plt.imshow(units[0, :, :, i], interpolation="nearest", cmap="gray")
plt.savefig('/Users/raghav/Documents/Uni/oc-nn/models/representation_sigmoid_dog.png')
# def getActivations(layer, stimuli):
# units = sess.run(layer, feed_dict={x: np.reshape(stimuli, [1, 784], order='F'), keep_prob: 1.0})
# plotNNFilter(units)
def init_weights(shape):
""" Weight initialization """
weights = tf.random_normal(shape,mean=0, stddev=0.00001)
return tf.Variable(weights)
def forwardprop(X, w_1, w_2):
"""
Forward-propagation.
IMPORTANT: yhat is not softmax since TensorFlow's softmax_cross_entropy_with_logits() does that internally.
"""
X = tf.cast(X, tf.float32)
w_1 = tf.cast(w_1, tf.float32)
w_2 = tf.cast(w_2, tf.float32)
h = tf.nn.sigmoid(tf.matmul(X, w_1)) # The \sigma function
yhat = tf.matmul(h, w_2) # The \varphi function
return yhat
g = lambda x : 1/(1 + tf.exp(-x))
def nnScore(X, w, V, g):
X = tf.cast(X, tf.float32)
w = tf.cast(w, tf.float32)
V = tf.cast(V, tf.float32)
return tf.matmul(g((tf.matmul(X, w))), V)
def data_rep(X, w, V, g):
X = tf.cast(X, tf.float32)
w = tf.cast(w, tf.float32)
return g((tf.matmul(X, w)))
def relu(x):
y = tf.nn.relu(x)
return y
def ocnn_obj(theta, X, nu, w1, w2, g,r):
w = w1
V = w2
X = tf.cast(X, tf.float32)
w = tf.cast(w1, tf.float32)
V = tf.cast(w2, tf.float32)
term1 = 0.5 * tf.reduce_sum(w**2)
term2 = 0.5 * tf.reduce_sum(V**2)
term3 = 1/nu * tf.reduce_mean(relu(r - nnScore(X, w, V, g)))
term4 = -r
return term1 + term2 + term3 + term4
# For testing the algorithm
test_X = data_test
# Symbols
X = tf.placeholder("float32", shape=[None, x_size])
r = tf.get_variable("r", dtype=tf.float32,shape=(),trainable=False)
# Weight initializations
w_1 = init_weights((x_size, h_size))
w_2 = init_weights((h_size, y_size))
# Forward propagation
yhat = forwardprop(X, w_1, w_2)
predict = tf.argmax(yhat, axis=1)
# Backward propagation
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=yhat))
cost = ocnn_obj(theta, X, nu, w_1, w_2, g,r)
updates = tf.train.GradientDescentOptimizer(0.0001).minimize(cost)
# Run SGD
init = tf.global_variables_initializer()
sess.run(init)
rvalue = 0.1
start_time = time.time()
for epoch in range(100):
# Train with each example
units = sess.run(updates, feed_dict={X: train_X,r:rvalue})
# plotNNFilter(units)
with sess.as_default():
w1 = w_1.eval()
w2 = w_2.eval()
rvalue = nnScore(train_X, w1, w2, g)
with sess.as_default():
rvalue = rvalue.eval()
rvalue = np.percentile(rvalue,q=100*nu)
print("Epoch = %d, r = %f"
% (epoch + 1,rvalue))
trainTime = time.time() - start_time
with sess.as_default():
w1 = w_1.eval()
w2 = w_2.eval()
start_time = time.time()
train = nnScore(train_X, w1, w2, g)
test = nnScore(test_X, w1, w2, g)
train_rep = data_rep(train_X, w1, w2, g)
test_rep = data_rep(test_X, w1, w2, g)
testTime = time.time() - start_time
with sess.as_default():
arrayTrain = train.eval()
arrayTest = test.eval()
arraytrain_rep =train_rep.eval()
arraytest_rep= test_rep.eval()
# rstar = r.eval()
rstar =rvalue
sess.close()
print "Session Closed!!!"
print "Saving Hidden layer weights w1 for cifar.. data"
import scipy.io as sio
sio.savemat('/Users/raghav/Documents/Uni/oc-nn/models/w1.mat', {'data': arraytrain_rep})
sio.savemat('/Users/raghav/Documents/Uni/oc-nn/models/w2.mat', {'data': arraytest_rep})
pos_decisionScore = arrayTrain-rstar
pos_decisionScore[pos_decisionScore< 0] = 0 ## Clip all the negative values to zero
neg_decisionScore = arrayTest-rstar
# write_decisionScores2Csv(decision_scorePath, "OneClass_NN_sigmoid.csv", pos_decisionScore, neg_decisionScore)
return [pos_decisionScore, neg_decisionScore,trainTime,testTime]
def tf_OneClass_NN_relu(data_train,data_test,nu):
tf.reset_default_graph()
sess = tf.Session()
train_X = data_train
RANDOM_SEED = 42
tf.set_random_seed(RANDOM_SEED)
# Layer's sizes
x_size = train_X.shape[1] # Number of input nodes: 4 features and 1 bias
print "Input Shape:", x_size
h_size = 16 # Number of hidden nodes
y_size = 1 # Number of outcomes (3 iris flowers)
D = x_size
K = h_size
theta = np.random.normal(0, 1, K + K*D + 1)
rvalue = np.random.normal(0,1,(len(train_X),y_size))
# nu = 0.1
def init_weights(shape):
""" Weight initialization """
weights = tf.random_normal(shape,mean=0, stddev=0.00001)
return tf.Variable(weights)
def forwardprop(X, w_1, w_2):
"""
Forward-propagation.
IMPORTANT: yhat is not softmax since TensorFlow's softmax_cross_entropy_with_logits() does that internally.
"""
X = tf.cast(X, tf.float32)
w_1 = tf.cast(w_1, tf.float32)
w_2 = tf.cast(w_2, tf.float32)
h = tf.nn.sigmoid(tf.matmul(X, w_1)) # The \sigma function
yhat = tf.matmul(h, w_2) # The \varphi function
return yhat
g = lambda x : relu(x)
def nnScore(X, w, V, g):
X = tf.cast(X, tf.float32)
w = tf.cast(w, tf.float32)
V = tf.cast(V, tf.float32)
return tf.matmul(g((tf.matmul(X, w))), V)
def relu(x):
y = tf.nn.relu(x)
return y
def ocnn_obj(theta, X, nu, w1, w2, g,r):
w = w1
V = w2
X = tf.cast(X, tf.float32)
w = tf.cast(w1, tf.float32)
V = tf.cast(w2, tf.float32)
term1 = 0.5 * tf.reduce_sum(w**2)
term2 = 0.5 * tf.reduce_sum(V**2)
term3 = 1/nu * tf.reduce_mean(relu(r - nnScore(X, w, V, g)))
term4 = -r
return term1 + term2 + term3 + term4
# For testing the algorithm
test_X = data_test
# Symbols
X = tf.placeholder("float32", shape=[None, x_size])
r = tf.get_variable("r", dtype=tf.float32,shape=(),trainable=False)
# Weight initializations
w_1 = init_weights((x_size, h_size))
w_2 = init_weights((h_size, y_size))
# Forward propagation
yhat = forwardprop(X, w_1, w_2)
predict = tf.argmax(yhat, axis=1)
# Backward propagation
# cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=yhat))
cost = ocnn_obj(theta, X, nu, w_1, w_2, g,r)
updates = tf.train.GradientDescentOptimizer(0.0001).minimize(cost)
# Run SGD
init = tf.global_variables_initializer()
sess.run(init)
rvalue = 0.1
for epoch in range(100):
# Train with each example
sess.run(updates, feed_dict={X: train_X,r:rvalue})
with sess.as_default():
w1 = w_1.eval()
w2 = w_2.eval()
rvalue = nnScore(train_X, w1, w2, g)
with sess.as_default():
rvalue = rvalue.eval()
rvalue = np.percentile(rvalue,q=100*nu)
print("Epoch = %d, r = %f"
% (epoch + 1,rvalue))
with sess.as_default():
w1 = w_1.eval()
w2 = w_2.eval()
train = nnScore(train_X, w1, w2, g)
test = nnScore(test_X, w1, w2, g)
with sess.as_default():
arrayTrain = train.eval()
arrayTest = test.eval()
# rstar = r.eval()
rstar =rvalue
sess.close()
print "Session Closed!!!"
pos_decisionScore = arrayTrain-rstar
pos_decisionScore[pos_decisionScore< 0] = 0 ## Clip all the negative values to zero
neg_decisionScore = arrayTest-rstar
return [pos_decisionScore,neg_decisionScore]
|
ext/bookstructures.py
|
kawaken/typescript-guide
| 456 |
130796
|
<filename>ext/bookstructures.py
from docutils import nodes
from docutils.parsers.rst import Directive
from sphinx.locale import _
def frontmatter(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return [nodes.raw(
'',
r"""
\include{tobiraura}
\frontmatter
\setcounter{page}{3}
""",
format='latex')]
def mainmatter(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return [nodes.raw(
'',
r"""
\withintoctrue
\tableofcontents
\withintocfalse
\mainmatter
""",
format='latex')]
def backmatter(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return [nodes.raw(
'',
r"""
\backmatter
""",
format='latex')]
def appendix(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return [nodes.raw(
'',
r"""
\appendix
""",
format='latex')]
def setup(app):
app.add_directive('frontmatter', frontmatter, 1, (0, 0, 0))
app.add_directive('mainmatter', mainmatter, 1, (0, 0, 0))
app.add_directive('appendix', appendix, 1, (0, 0, 0))
app.add_directive('backmatter', backmatter, 1, (0, 0, 0))
|
pommerman/forward_model.py
|
aarunsrinivas/pommerman
| 725 |
130807
|
'''Module to manage and advanced game state'''
from collections import defaultdict
import numpy as np
from . import constants
from . import characters
from . import utility
class ForwardModel(object):
"""Class for helping with the [forward] modeling of the game state."""
def run(self,
num_times,
board,
agents,
bombs,
items,
flames,
is_partially_observable,
agent_view_size,
action_space,
training_agent=None,
is_communicative=False):
"""Run the forward model.
Args:
num_times: The number of times to run it for. This is a maximum and
it will stop early if we reach a done.
board: The board state to run it from.
agents: The agents to use to run it.
bombs: The starting bombs.
items: The starting items.
flames: The starting flames.
is_partially_observable: Whether the board is partially observable or
not. Only applies to TeamRadio.
agent_view_size: If it's partially observable, then the size of the
square that the agent can view.
action_space: The actions that each agent can take.
training_agent: The training agent to pass to done.
is_communicative: Whether the action depends on communication
observations as well.
Returns:
steps: The list of step results, which are each a dict of "obs",
"next_obs", "reward", "action".
board: Updated board.
agents: Updated agents, same models though.
bombs: Updated bombs.
items: Updated items.
flames: Updated flames.
done: Whether we completed the game in these steps.
info: The result of the game if it's completed.
"""
steps = []
for _ in num_times:
obs = self.get_observations(
board, agents, bombs, is_partially_observable, agent_view_size)
actions = self.act(
agents, obs, action_space, is_communicative=is_communicative)
board, agents, bombs, items, flames = self.step(
actions, board, agents, bombs, items, flames)
next_obs = self.get_observations(
board, agents, bombs, is_partially_observable, agent_view_size)
reward = self.get_rewards(agents, game_type, step_count, max_steps)
done = self.get_done(agents, game_type, step_count, max_steps,
training_agent)
info = self.get_info(done, rewards, game_type, agents)
steps.append({
"obs": obs,
"next_obs": next_obs,
"reward": reward,
"actions": actions,
})
if done:
# Callback to let the agents know that the game has ended.
for agent in agents:
agent.episode_end(reward[agent.agent_id])
break
return steps, board, agents, bombs, items, flames, done, info
@staticmethod
def act(agents, obs, action_space, is_communicative=False):
"""Returns actions for each agent in this list.
Args:
agents: A list of agent objects.
obs: A list of matching observations per agent.
action_space: The action space for the environment using this model.
is_communicative: Whether the action depends on communication
observations as well.
Returns a list of actions.
"""
def act_ex_communication(agent):
'''Handles agent's move without communication'''
if agent.is_alive:
return agent.act(obs[agent.agent_id], action_space=action_space)
else:
return constants.Action.Stop.value
def act_with_communication(agent):
'''Handles agent's move with communication'''
if agent.is_alive:
action = agent.act(
obs[agent.agent_id], action_space=action_space)
if type(action) == int:
action = [action] + [0, 0]
assert (type(action) == list)
return action
else:
return [constants.Action.Stop.value, 0, 0]
ret = []
for agent in agents:
if is_communicative:
ret.append(act_with_communication(agent))
else:
ret.append(act_ex_communication(agent))
return ret
@staticmethod
def step(actions,
curr_board,
curr_agents,
curr_bombs,
curr_items,
curr_flames,
max_blast_strength=10):
board_size = len(curr_board)
# Tick the flames. Replace any dead ones with passages. If there is an
# item there, then reveal that item.
flames = []
for flame in curr_flames:
position = flame.position
if flame.is_dead():
item_value = curr_items.get(position)
if item_value:
del curr_items[position]
else:
item_value = constants.Item.Passage.value
curr_board[position] = item_value
else:
flame.tick()
flames.append(flame)
curr_flames = flames
# Redraw all current flames
# Multiple flames may share a position and the map should contain
# a flame until all flames are dead to avoid issues with bomb
# movements and explosions.
for flame in curr_flames:
curr_board[flame.position] = constants.Item.Flames.value
# Step the living agents and moving bombs.
# If two agents try to go to the same spot, they should bounce back to
# their previous spots. This is complicated with one example being when
# there are three agents all in a row. If the one in the middle tries
# to go to the left and bounces with the one on the left, and then the
# one on the right tried to go to the middle one's position, she should
# also bounce. A way of doing this is to gather all the new positions
# before taking any actions. Then, if there are disputes, correct those
# disputes iteratively.
# Additionally, if two agents try to switch spots by moving into each
# Figure out desired next position for alive agents
alive_agents = [agent for agent in curr_agents if agent.is_alive]
desired_agent_positions = [agent.position for agent in alive_agents]
for num_agent, agent in enumerate(alive_agents):
position = agent.position
# We change the curr_board here as a safeguard. We will later
# update the agent's new position.
curr_board[position] = constants.Item.Passage.value
action = actions[agent.agent_id]
if action == constants.Action.Stop.value:
pass
elif action == constants.Action.Bomb.value:
position = agent.position
if not utility.position_is_bomb(curr_bombs, position):
bomb = agent.maybe_lay_bomb()
if bomb:
curr_bombs.append(bomb)
elif utility.is_valid_direction(curr_board, position, action):
desired_agent_positions[num_agent] = agent.get_next_position(
action)
# Gather desired next positions for moving bombs. Handle kicks later.
desired_bomb_positions = [bomb.position for bomb in curr_bombs]
for num_bomb, bomb in enumerate(curr_bombs):
curr_board[bomb.position] = constants.Item.Passage.value
if bomb.is_moving():
desired_position = utility.get_next_position(
bomb.position, bomb.moving_direction)
if utility.position_on_board(curr_board, desired_position) \
and not utility.position_is_powerup(curr_board, desired_position) \
and not utility.position_is_wall(curr_board, desired_position):
desired_bomb_positions[num_bomb] = desired_position
# Position switches:
# Agent <-> Agent => revert both to previous position.
# Bomb <-> Bomb => revert both to previous position.
# Agent <-> Bomb => revert Bomb to previous position.
crossings = {}
def crossing(current, desired):
'''Checks to see if an agent is crossing paths'''
current_x, current_y = current
desired_x, desired_y = desired
if current_x != desired_x:
assert current_y == desired_y
return ('X', min(current_x, desired_x), current_y)
assert current_x == desired_x
return ('Y', current_x, min(current_y, desired_y))
for num_agent, agent in enumerate(alive_agents):
if desired_agent_positions[num_agent] != agent.position:
desired_position = desired_agent_positions[num_agent]
border = crossing(agent.position, desired_position)
if border in crossings:
# Crossed another agent - revert both to prior positions.
desired_agent_positions[num_agent] = agent.position
num_agent2, _ = crossings[border]
desired_agent_positions[num_agent2] = alive_agents[
num_agent2].position
else:
crossings[border] = (num_agent, True)
for num_bomb, bomb in enumerate(curr_bombs):
if desired_bomb_positions[num_bomb] != bomb.position:
desired_position = desired_bomb_positions[num_bomb]
border = crossing(bomb.position, desired_position)
if border in crossings:
# Crossed - revert to prior position.
desired_bomb_positions[num_bomb] = bomb.position
num, is_agent = crossings[border]
if not is_agent:
# Crossed bomb - revert that to prior position as well.
desired_bomb_positions[num] = curr_bombs[num].position
else:
crossings[border] = (num_bomb, False)
# Deal with multiple agents or multiple bomb collisions on desired next
# position by resetting desired position to current position for
# everyone involved in the collision.
agent_occupancy = defaultdict(int)
bomb_occupancy = defaultdict(int)
for desired_position in desired_agent_positions:
agent_occupancy[desired_position] += 1
for desired_position in desired_bomb_positions:
bomb_occupancy[desired_position] += 1
# Resolve >=2 agents or >=2 bombs trying to occupy the same space.
change = True
while change:
change = False
for num_agent, agent in enumerate(alive_agents):
desired_position = desired_agent_positions[num_agent]
curr_position = agent.position
# Either another agent is going to this position or more than
# one bomb is going to this position. In both scenarios, revert
# to the original position.
if desired_position != curr_position and \
(agent_occupancy[desired_position] > 1 or bomb_occupancy[desired_position] > 1):
desired_agent_positions[num_agent] = curr_position
agent_occupancy[curr_position] += 1
change = True
for num_bomb, bomb in enumerate(curr_bombs):
desired_position = desired_bomb_positions[num_bomb]
curr_position = bomb.position
if desired_position != curr_position and \
(bomb_occupancy[desired_position] > 1 or agent_occupancy[desired_position] > 1):
desired_bomb_positions[num_bomb] = curr_position
bomb_occupancy[curr_position] += 1
change = True
# Handle kicks.
agent_indexed_by_kicked_bomb = {}
kicked_bomb_indexed_by_agent = {}
delayed_bomb_updates = []
delayed_agent_updates = []
# Loop through all bombs to see if they need a good kicking or cause
# collisions with an agent.
for num_bomb, bomb in enumerate(curr_bombs):
desired_position = desired_bomb_positions[num_bomb]
if agent_occupancy[desired_position] == 0:
# There was never an agent around to kick or collide.
continue
agent_list = [
(num_agent, agent) for (num_agent, agent) in enumerate(alive_agents) \
if desired_position == desired_agent_positions[num_agent]]
if not agent_list:
# Agents moved from collision.
continue
# The agent_list should contain a single element at this point.
assert (len(agent_list) == 1)
num_agent, agent = agent_list[0]
if desired_position == agent.position:
# Agent did not move
if desired_position != bomb.position:
# Bomb moved, but agent did not. The bomb should revert
# and stop.
delayed_bomb_updates.append((num_bomb, bomb.position))
continue
# NOTE: At this point, we have that the agent in question tried to
# move into this position.
if not agent.can_kick:
# If we move the agent at this point, then we risk having two
# agents on a square in future iterations of the loop. So we
# push this change to the next stage instead.
delayed_bomb_updates.append((num_bomb, bomb.position))
delayed_agent_updates.append((num_agent, agent.position))
continue
# Agent moved and can kick - see if the target for the kick never had anyhing on it
direction = constants.Action(actions[agent.agent_id])
target_position = utility.get_next_position(desired_position,
direction)
if utility.position_on_board(curr_board, target_position) and \
agent_occupancy[target_position] == 0 and \
bomb_occupancy[target_position] == 0 and \
not utility.position_is_powerup(curr_board, target_position) and \
not utility.position_is_wall(curr_board, target_position):
# Ok to update bomb desired location as we won't iterate over it again here
# but we can not update bomb_occupancy on target position and need to check it again
# However we need to set the bomb count on the current position to zero so
# that the agent can stay on this position.
bomb_occupancy[desired_position] = 0
delayed_bomb_updates.append((num_bomb, target_position))
agent_indexed_by_kicked_bomb[num_bomb] = num_agent
kicked_bomb_indexed_by_agent[num_agent] = num_bomb
bomb.moving_direction = direction
# Bombs may still collide and we then need to reverse bomb and agent ..
else:
delayed_bomb_updates.append((num_bomb, bomb.position))
delayed_agent_updates.append((num_agent, agent.position))
for (num_bomb, bomb_position) in delayed_bomb_updates:
desired_bomb_positions[num_bomb] = bomb_position
bomb_occupancy[bomb_position] += 1
change = True
for (num_agent, agent_position) in delayed_agent_updates:
desired_agent_positions[num_agent] = agent_position
agent_occupancy[agent_position] += 1
change = True
while change:
change = False
for num_agent, agent in enumerate(alive_agents):
desired_position = desired_agent_positions[num_agent]
curr_position = agent.position
# Agents and bombs can only share a square if they are both in their
# original position (Agent dropped bomb and has not moved)
if desired_position != curr_position and \
(agent_occupancy[desired_position] > 1 or bomb_occupancy[desired_position] != 0):
# Late collisions resulting from failed kicks force this agent to stay at the
# original position. Check if this agent successfully kicked a bomb above and undo
# the kick.
if num_agent in kicked_bomb_indexed_by_agent:
num_bomb = kicked_bomb_indexed_by_agent[num_agent]
bomb = curr_bombs[num_bomb]
desired_bomb_positions[num_bomb] = bomb.position
bomb_occupancy[bomb.position] += 1
del agent_indexed_by_kicked_bomb[num_bomb]
del kicked_bomb_indexed_by_agent[num_agent]
desired_agent_positions[num_agent] = curr_position
agent_occupancy[curr_position] += 1
change = True
for num_bomb, bomb in enumerate(curr_bombs):
desired_position = desired_bomb_positions[num_bomb]
curr_position = bomb.position
# This bomb may be a boomerang, i.e. it was kicked back to the
# original location it moved from. If it is blocked now, it
# can't be kicked and the agent needs to move back to stay
# consistent with other movements.
if desired_position == curr_position and num_bomb not in agent_indexed_by_kicked_bomb:
continue
bomb_occupancy_ = bomb_occupancy[desired_position]
agent_occupancy_ = agent_occupancy[desired_position]
# Agents and bombs can only share a square if they are both in their
# original position (Agent dropped bomb and has not moved)
if bomb_occupancy_ > 1 or agent_occupancy_ != 0:
desired_bomb_positions[num_bomb] = curr_position
bomb_occupancy[curr_position] += 1
num_agent = agent_indexed_by_kicked_bomb.get(num_bomb)
if num_agent is not None:
agent = alive_agents[num_agent]
desired_agent_positions[num_agent] = agent.position
agent_occupancy[agent.position] += 1
del kicked_bomb_indexed_by_agent[num_agent]
del agent_indexed_by_kicked_bomb[num_bomb]
change = True
for num_bomb, bomb in enumerate(curr_bombs):
if desired_bomb_positions[num_bomb] == bomb.position and \
not num_bomb in agent_indexed_by_kicked_bomb:
# Bomb was not kicked this turn and its desired position is its
# current location. Stop it just in case it was moving before.
bomb.stop()
else:
# Move bomb to the new position.
# NOTE: We already set the moving direction up above.
bomb.position = desired_bomb_positions[num_bomb]
for num_agent, agent in enumerate(alive_agents):
if desired_agent_positions[num_agent] != agent.position:
agent.move(actions[agent.agent_id])
if utility.position_is_powerup(curr_board, agent.position):
agent.pick_up(
constants.Item(curr_board[agent.position]),
max_blast_strength=max_blast_strength)
# Explode bombs.
exploded_map = np.zeros_like(curr_board)
has_new_explosions = False
for bomb in curr_bombs:
bomb.tick()
if bomb.exploded():
has_new_explosions = True
elif curr_board[bomb.position] == constants.Item.Flames.value:
bomb.fire()
has_new_explosions = True
# Chain the explosions.
while has_new_explosions:
next_bombs = []
has_new_explosions = False
for bomb in curr_bombs:
if not bomb.exploded():
next_bombs.append(bomb)
continue
bomb.bomber.incr_ammo()
for _, indices in bomb.explode().items():
for r, c in indices:
if not all(
[r >= 0, c >= 0, r < board_size, c < board_size]):
break
if curr_board[r][c] == constants.Item.Rigid.value:
break
exploded_map[r][c] = 1
if curr_board[r][c] == constants.Item.Wood.value:
break
curr_bombs = next_bombs
for bomb in curr_bombs:
if bomb.in_range(exploded_map):
bomb.fire()
has_new_explosions = True
# Update the board's bombs.
for bomb in curr_bombs:
curr_board[bomb.position] = constants.Item.Bomb.value
# Update the board's flames.
flame_positions = np.where(exploded_map == 1)
for row, col in zip(flame_positions[0], flame_positions[1]):
curr_flames.append(characters.Flame((row, col)))
for flame in curr_flames:
curr_board[flame.position] = constants.Item.Flames.value
# Kill agents on flames. Otherwise, update position on curr_board.
for agent in alive_agents:
if curr_board[agent.position] == constants.Item.Flames.value:
agent.die()
else:
curr_board[agent.position] = utility.agent_value(agent.agent_id)
return curr_board, curr_agents, curr_bombs, curr_items, curr_flames
def get_observations(self, curr_board, agents, bombs, flames,
is_partially_observable, agent_view_size,
game_type, game_env):
"""Gets the observations as an np.array of the visible squares.
The agent gets to choose whether it wants to keep the fogged part in
memory.
"""
board_size = len(curr_board)
def make_bomb_maps(position):
''' Makes an array of an agents bombs and the bombs attributes '''
blast_strengths = np.zeros((board_size, board_size))
life = np.zeros((board_size, board_size))
moving_direction = np.zeros((board_size, board_size))
for bomb in bombs:
x, y = bomb.position
if not is_partially_observable \
or in_view_range(position, x, y):
blast_strengths[(x, y)] = bomb.blast_strength
life[(x, y)] = bomb.life
if bomb.moving_direction is not None:
moving_direction[(x, y)] = bomb.moving_direction.value
return blast_strengths, life, moving_direction
def make_flame_map(position):
''' Makes an array of an agents flame life'''
life = np.zeros((board_size, board_size))
for flame in flames:
x, y = flame.position
if not is_partially_observable \
or in_view_range(position, x, y):
# +1 needed because flame removal check is done
# before flame is ticked down, i.e. flame life
# in environment is 2 -> 1 -> 0 -> dead
life[(x, y)] = flame.life + 1
return life
def in_view_range(position, v_row, v_col):
'''Checks to see if a tile is in an agents viewing area'''
row, col = position
return all([
row >= v_row - agent_view_size, row <= v_row + agent_view_size,
col >= v_col - agent_view_size, col <= v_col + agent_view_size
])
attrs = [
'position', 'blast_strength', 'can_kick', 'teammate', 'ammo',
'enemies'
]
alive_agents = [
utility.agent_value(agent.agent_id)
for agent in agents
if agent.is_alive
]
observations = []
for agent in agents:
agent_obs = {'alive': alive_agents}
board = curr_board.copy()
if is_partially_observable:
for row in range(board_size):
for col in range(board_size):
if not in_view_range(agent.position, row, col):
board[row, col] = constants.Item.Fog.value
agent_obs['board'] = board
bomb_blast_strengths, bomb_life, bomb_moving_direction = make_bomb_maps(agent.position)
agent_obs['bomb_blast_strength'] = bomb_blast_strengths
agent_obs['bomb_life'] = bomb_life
agent_obs['bomb_moving_direction'] = bomb_moving_direction
flame_life = make_flame_map(agent.position)
agent_obs['flame_life'] = flame_life
agent_obs['game_type'] = game_type.value
agent_obs['game_env'] = game_env
for attr in attrs:
assert hasattr(agent, attr)
agent_obs[attr] = getattr(agent, attr)
observations.append(agent_obs)
return observations
@staticmethod
def get_done(agents, step_count, max_steps, game_type, training_agent):
alive = [agent for agent in agents if agent.is_alive]
alive_ids = sorted([agent.agent_id for agent in alive])
if step_count >= max_steps:
return True
elif game_type == constants.GameType.FFA or game_type == constants.GameType.OneVsOne:
if training_agent is not None and training_agent not in alive_ids:
return True
return len(alive) <= 1
elif any([
len(alive_ids) <= 1,
alive_ids == [0, 2],
alive_ids == [1, 3],
]):
return True
return False
@staticmethod
def get_info(done, rewards, game_type, agents):
if game_type == constants.GameType.FFA or game_type == constants.GameType.OneVsOne:
alive = [agent for agent in agents if agent.is_alive]
if done:
if len(alive) != 1:
# Either we have more than 1 alive (reached max steps) or
# we have 0 alive (last agents died at the same time).
return {
'result': constants.Result.Tie,
}
else:
return {
'result': constants.Result.Win,
'winners': [num for num, reward in enumerate(rewards) \
if reward == 1]
}
else:
return {
'result': constants.Result.Incomplete,
}
elif done:
# We are playing a team game.
if rewards == [-1] * 4:
return {
'result': constants.Result.Tie,
}
else:
return {
'result': constants.Result.Win,
'winners': [num for num, reward in enumerate(rewards) \
if reward == 1],
}
else:
return {
'result': constants.Result.Incomplete,
}
@staticmethod
def get_rewards(agents, game_type, step_count, max_steps):
def any_lst_equal(lst, values):
'''Checks if list are equal'''
return any([lst == v for v in values])
alive_agents = [num for num, agent in enumerate(agents) \
if agent.is_alive]
if game_type == constants.GameType.FFA:
if len(alive_agents) == 1:
# An agent won. Give them +1, others -1.
return [2 * int(agent.is_alive) - 1 for agent in agents]
elif step_count >= max_steps:
# Game is over from time. Everyone gets -1.
return [-1] * 4
else:
# Game running: 0 for alive, -1 for dead.
return [int(agent.is_alive) - 1 for agent in agents]
elif game_type == constants.GameType.OneVsOne:
if len(alive_agents) == 1:
# An agent won. Give them +1, the other -1.
return [2 * int(agent.is_alive) - 1 for agent in agents]
elif step_count >= max_steps:
# Game is over from time. Everyone gets -1.
return [-1] * 2
else:
# Game running
return [0, 0]
else:
# We are playing a team game.
if any_lst_equal(alive_agents, [[0, 2], [0], [2]]):
# Team [0, 2] wins.
return [1, -1, 1, -1]
elif any_lst_equal(alive_agents, [[1, 3], [1], [3]]):
# Team [1, 3] wins.
return [-1, 1, -1, 1]
elif step_count >= max_steps:
# Game is over by max_steps. All agents tie.
return [-1] * 4
elif len(alive_agents) == 0:
# Everyone's dead. All agents tie.
return [-1] * 4
else:
# No team has yet won or lost.
return [0] * 4
|
tests/snippets.py
|
pierogis/imageio-ffmpeg
| 127 |
130811
|
<reponame>pierogis/imageio-ffmpeg<gh_stars>100-1000
"""
Snippets of code that are hard to bring under test, but that can be
used to manually test the behavior of imageip-ffmpeg in certain
use-cases. Some may depend on imageio.
"""
# %% Write a series of large frames
# In earlier versions of imageio-ffmpeg, the ffmpeg process was given a timeout
# to complete, but this timeout must be longer for longer movies. The default
# is now to wait for ffmpeg.
import os
import numpy as np
import imageio_ffmpeg
ims = [
np.random.uniform(0, 255, size=(1000, 1000, 3)).astype(np.uint8) for i in range(10)
]
filename = os.path.expanduser("~/Desktop/foo.mp4")
w = imageio_ffmpeg.write_frames(filename, (1000, 1000), ffmpeg_timeout=0)
w.send(None)
for i in range(200):
w.send(ims[i % 10])
print(i)
w.close()
# %% Behavior of KeyboardInterrupt / Ctrl+C
import os
import imageio_ffmpeg
filename = os.path.expanduser("~/.imageio/images/cockatoo.mp4")
reader = imageio_ffmpeg.read_frames(filename)
meta = reader.__next__()
try:
input("Do a manual KeyboardInterrupt now [Ctrl]+[c]")
# Note: Raising an error with code won't trigger the original error.
except BaseException as err:
print(err)
print("out1", len(reader.__next__()))
print("out2", len(reader.__next__()))
print("closing")
reader.close()
print("closed")
|
axelrod/tests/strategies/test_gambler.py
|
nandhinianandj/Axelrod
| 596 |
130818
|
<filename>axelrod/tests/strategies/test_gambler.py
"""Test for the Gambler strategy. Most tests come from the LookerUp test suite.
"""
import copy
import unittest
import axelrod as axl
from axelrod.load_data_ import load_pso_tables
from axelrod.strategies.lookerup import create_lookup_table_keys
from .test_evolvable_player import PartialClass, TestEvolvablePlayer
from .test_lookerup import convert_original_to_current
from .test_player import TestPlayer
tables = load_pso_tables("pso_gambler.csv", directory="data")
C, D = axl.Action.C, axl.Action.D
random = axl.RandomGenerator()
class TestGambler(TestPlayer):
name = "Gambler"
player = axl.Gambler
expected_classifier = {
"memory_depth": 1,
"stochastic": True,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
expected_class_classifier = copy.copy(expected_classifier)
def test_strategy(self):
tft_table = {((), (D,), ()): 0, ((), (C,), ()): 1}
self.versus_test(
axl.Alternator(),
expected_actions=[(C, C)] + [(C, D), (D, C)] * 5,
init_kwargs={"lookup_dict": tft_table},
)
def test_stochastic_values(self):
stochastic_lookup = {((), (), ()): 0.3}
expected_actions = [(C, C), (D, C), (D, C), (C, C), (D, C)]
self.versus_test(
axl.Cooperator(),
expected_actions=expected_actions,
init_kwargs={"lookup_dict": stochastic_lookup},
seed=80,
)
class TestPSOGamblerMem1(TestPlayer):
name = "PSO Gambler Mem1"
player = axl.PSOGamblerMem1
expected_classifier = {
"memory_depth": 1,
"stochastic": True,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
expected_class_classifier = copy.copy(expected_classifier)
def test_new_data(self):
original_data = {
("", "C", "C"): 1.0,
("", "C", "D"): 0.52173487,
("", "D", "C"): 0.0,
("", "D", "D"): 0.12050939,
}
converted_original = convert_original_to_current(original_data)
self.assertEqual(self.player().lookup_dict, converted_original)
def test_strategy(self):
vs_cooperator = [(C, C)] * 5
self.versus_test(axl.Cooperator(), expected_actions=vs_cooperator)
def test_defects_forever_with_correct_conditions(self):
opponent_actions = [D, D] + [C] * 10
expected = [(C, D), (C, D), (D, C)] + [(D, C)] * 9
self.versus_test(
axl.MockPlayer(actions=opponent_actions),
expected_actions=expected,
seed=1,
)
class TestPSOGambler1_1_1(TestPlayer):
name = "PSO Gambler 1_1_1"
player = axl.PSOGambler1_1_1
expected_classifier = {
"memory_depth": float("inf"),
"stochastic": True,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def test_new_data(self):
original_data = {
("C", "C", "C"): 1.0,
("C", "C", "D"): 0.12304797,
("C", "D", "C"): 0.0,
("C", "D", "D"): 0.13581423,
("D", "C", "C"): 1.0,
("D", "C", "D"): 0.57740178,
("D", "D", "C"): 0.0,
("D", "D", "D"): 0.11886807,
}
converted_original = convert_original_to_current(original_data)
self.assertEqual(self.player().lookup_dict, converted_original)
def test_cooperate_forever(self):
opponent = [D] * 3 + [C] * 10
expected = [(C, D), (D, D), (D, D)] + [(C, C)] * 10
self.versus_test(
axl.MockPlayer(opponent), expected_actions=expected, seed=4
)
def test_defect_forever(self):
opponent_actions = [C] + [D] + [C] * 10
expected = [(C, C), (C, D)] + [(D, C)] * 10
self.versus_test(
axl.MockPlayer(opponent_actions), expected_actions=expected, seed=2
)
def test_defect_forever2(self):
opponent_actions = [D] + [C] * 10
expected = [(C, D)] + [(D, C)] * 10
self.versus_test(
axl.MockPlayer(opponent_actions), expected_actions=expected, seed=4
)
class TestPSOGambler2_2_2(TestPlayer):
name = "PSO Gambler 2_2_2"
player = axl.PSOGambler2_2_2
expected_classifier = {
"memory_depth": float("inf"),
"stochastic": True,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def test_new_data(self):
original_data = {
("CC", "CC", "CC"): 1.0,
("CC", "CC", "CD"): 1.0,
("CC", "CC", "DC"): 0.0,
("CC", "CC", "DD"): 0.02126434,
("CC", "CD", "CC"): 0.0,
("CC", "CD", "CD"): 1.0,
("CC", "CD", "DC"): 1.0,
("CC", "CD", "DD"): 0.0,
("CC", "DC", "CC"): 0.0,
("CC", "DC", "CD"): 0.0,
("CC", "DC", "DC"): 0.0,
("CC", "DC", "DD"): 0.0,
("CC", "DD", "CC"): 0.0,
("CC", "DD", "CD"): 0.0,
("CC", "DD", "DC"): 0.0,
("CC", "DD", "DD"): 1.0,
("CD", "CC", "CC"): 1.0,
("CD", "CC", "CD"): 0.95280465,
("CD", "CC", "DC"): 0.80897541,
("CD", "CC", "DD"): 0.0,
("CD", "CD", "CC"): 0.0,
("CD", "CD", "CD"): 0.0,
("CD", "CD", "DC"): 0.0,
("CD", "CD", "DD"): 0.65147565,
("CD", "DC", "CC"): 0.15412392,
("CD", "DC", "CD"): 0.24922166,
("CD", "DC", "DC"): 0.0,
("CD", "DC", "DD"): 0.0,
("CD", "DD", "CC"): 0.0,
("CD", "DD", "CD"): 0.0,
("CD", "DD", "DC"): 0.0,
("CD", "DD", "DD"): 0.24523149,
("DC", "CC", "CC"): 1.0,
("DC", "CC", "CD"): 0.0,
("DC", "CC", "DC"): 0.0,
("DC", "CC", "DD"): 0.43278586,
("DC", "CD", "CC"): 1.0,
("DC", "CD", "CD"): 0.0,
("DC", "CD", "DC"): 0.23563137,
("DC", "CD", "DD"): 1.0,
("DC", "DC", "CC"): 1.0,
("DC", "DC", "CD"): 1.0,
("DC", "DC", "DC"): 0.00227615,
("DC", "DC", "DD"): 0.0,
("DC", "DD", "CC"): 0.0,
("DC", "DD", "CD"): 0.0,
("DC", "DD", "DC"): 0.0,
("DC", "DD", "DD"): 1.0,
("DD", "CC", "CC"): 0.0,
("DD", "CC", "CD"): 0.0,
("DD", "CC", "DC"): 0.0,
("DD", "CC", "DD"): 0.0,
("DD", "CD", "CC"): 0.15140743,
("DD", "CD", "CD"): 0.0,
("DD", "CD", "DC"): 0.0,
("DD", "CD", "DD"): 0.0,
("DD", "DC", "CC"): 0.0,
("DD", "DC", "CD"): 0.0,
("DD", "DC", "DC"): 0.0,
("DD", "DC", "DD"): 1.0,
("DD", "DD", "CC"): 0.0,
("DD", "DD", "CD"): 1.0,
("DD", "DD", "DC"): 0.77344942,
("DD", "DD", "DD"): 0.0,
}
converted_original = convert_original_to_current(original_data)
self.assertEqual(self.player().lookup_dict, converted_original)
def test_vs_defector(self):
expected = [(C, D), (C, D)] + [(D, D)] * 10
self.versus_test(axl.Defector(), expected_actions=expected)
def test_vs_cooperator(self):
expected = [(C, C)] * 10
self.versus_test(axl.Cooperator(), expected_actions=expected)
def test_vs_alternator(self):
expected = [(C, C), (C, D), (C, C), (D, D), (D, C), (D, D), (D, C)]
self.versus_test(axl.Alternator(), expected_actions=expected, seed=20)
def test_vs_DCDDC(self):
opponent_actions = [D, C, D, D, C]
expected = [
(C, D),
(C, C),
(D, D),
(D, D),
(C, C),
(D, D),
(D, C),
(D, D),
(D, D),
(C, C),
]
self.versus_test(
axl.MockPlayer(actions=opponent_actions),
expected_actions=expected,
seed=2,
)
def test_vs_DCDDC2(self):
opponent_actions = [D, C, D, D, C]
expected = [
(C, D),
(C, C),
(D, D),
(D, D),
(C, C),
(D, D),
(D, C),
(D, D),
(D, D),
(C, C),
]
expected[5] = (C, D)
self.versus_test(
axl.MockPlayer(actions=opponent_actions),
expected_actions=expected,
seed=531,
)
class TestPSOGambler2_2_2_Noise05(TestPlayer):
name = "PSO Gambler 2_2_2 Noise 05"
player = axl.PSOGambler2_2_2_Noise05
expected_classifier = {
"memory_depth": float("inf"),
"stochastic": True,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def test_new_data(self):
original_data = {
("CC", "CC", "CC"): 1.0,
("CC", "CC", "CD"): 0.0,
("CC", "CC", "DC"): 1.0,
("CC", "CC", "DD"): 0.63548102,
("CC", "CD", "CC"): 1.0,
("CC", "CD", "CD"): 1.0,
("CC", "CD", "DC"): 1.0,
("CC", "CD", "DD"): 0.0,
("CC", "DC", "CC"): 0.0,
("CC", "DC", "CD"): 1.0,
("CC", "DC", "DC"): 0.0,
("CC", "DC", "DD"): 0.0,
("CC", "DD", "CC"): 1.0,
("CC", "DD", "CD"): 0.0,
("CC", "DD", "DC"): 0.0,
("CC", "DD", "DD"): 0.0,
("CD", "CC", "CC"): 1.0,
("CD", "CC", "CD"): 1.0,
("CD", "CC", "DC"): 0.0,
("CD", "CC", "DD"): 0.0,
("CD", "CD", "CC"): 0.0,
("CD", "CD", "CD"): 0.13863175,
("CD", "CD", "DC"): 1.0,
("CD", "CD", "DD"): 0.7724137,
("CD", "DC", "CC"): 0.0,
("CD", "DC", "CD"): 1.0,
("CD", "DC", "DC"): 0.0,
("CD", "DC", "DD"): 0.07127653,
("CD", "DD", "CC"): 0.0,
("CD", "DD", "CD"): 1.0,
("CD", "DD", "DC"): 0.28124022,
("CD", "DD", "DD"): 0.0,
("DC", "CC", "CC"): 0.0,
("DC", "CC", "CD"): 0.98603825,
("DC", "CC", "DC"): 0.0,
("DC", "CC", "DD"): 0.0,
("DC", "CD", "CC"): 1.0,
("DC", "CD", "CD"): 0.06434619,
("DC", "CD", "DC"): 1.0,
("DC", "CD", "DD"): 1.0,
("DC", "DC", "CC"): 1.0,
("DC", "DC", "CD"): 0.50999729,
("DC", "DC", "DC"): 0.00524508,
("DC", "DC", "DD"): 1.0,
("DC", "DD", "CC"): 1.0,
("DC", "DD", "CD"): 1.0,
("DC", "DD", "DC"): 1.0,
("DC", "DD", "DD"): 1.0,
("DD", "CC", "CC"): 0.0,
("DD", "CC", "CD"): 1.0,
("DD", "CC", "DC"): 0.16240799,
("DD", "CC", "DD"): 0.0,
("DD", "CD", "CC"): 0.0,
("DD", "CD", "CD"): 1.0,
("DD", "CD", "DC"): 1.0,
("DD", "CD", "DD"): 0.0,
("DD", "DC", "CC"): 0.0,
("DD", "DC", "CD"): 1.0,
("DD", "DC", "DC"): 0.87463905,
("DD", "DC", "DD"): 0.0,
("DD", "DD", "CC"): 0.0,
("DD", "DD", "CD"): 1.0,
("DD", "DD", "DC"): 0.0,
("DD", "DD", "DD"): 0.0,
}
converted_original = convert_original_to_current(original_data)
self.assertEqual(self.player().lookup_dict, converted_original)
def test_vs_defector(self):
expected = [(C, D), (C, D)] + [(D, D)] * 10
self.versus_test(axl.Defector(), expected_actions=expected)
def test_vs_cooperator(self):
expected = [(C, C)] * 10
self.versus_test(axl.Cooperator(), expected_actions=expected)
def test_vs_alternator(self):
expected = [(C, C), (C, D), (C, C), (D, D), (D, C), (D, D), (C, C)]
self.versus_test(axl.Alternator(), expected_actions=expected, seed=2)
def test_vs_alternator2(self):
expected = [(C, C), (C, D), (C, C), (D, D), (D, C), (D, D), (C, C)]
expected[4] = (C, C)
expected[6] = (D, C)
self.versus_test(axl.Alternator(), expected_actions=expected, seed=3)
def test_vs_DCDDC(self):
opponent_actions = [D, C, D, D, C]
expected = [
(C, D),
(C, C),
(D, D),
(D, D),
(C, C),
(D, D),
(D, C),
(C, D),
(C, D),
]
self.versus_test(
axl.MockPlayer(opponent_actions), expected_actions=expected, seed=1
)
def test_vs_DCDDC2(self):
opponent_actions = [D, C, D, D, C]
expected = [
(C, D),
(C, C),
(D, D),
(D, D),
(C, C),
(D, D),
(D, C),
(C, D),
(D, D), # different than above test
]
self.versus_test(
axl.MockPlayer(opponent_actions),
expected_actions=expected,
seed=5,
)
def test_vs_DCDDC3(self):
opponent_actions = [D, C, D, D, C]
expected = [
(C, D),
(C, C),
(D, D),
(D, D),
(C, C),
(D, D),
(D, C),
(C, C), # different than above test
(D, D), # different than above test
(D, D), # different than above test
]
new_expected = expected[:6] + [(C, C), (D, D), (D, D)]
self.versus_test(
axl.MockPlayer(opponent_actions),
expected_actions=new_expected,
seed=10,
)
class TestZDMem2(TestPlayer):
name = "ZD-Mem2"
player = axl.ZDMem2
expected_classifier = {
"memory_depth": 2,
"stochastic": True,
"makes_use_of": set(),
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def test_new_data(self):
original_data = {
("", "CC", "CC"): 11 / 12,
("", "CC", "CD"): 4 / 11,
("", "CC", "DC"): 7 / 9,
("", "CC", "DD"): 1 / 10,
("", "CD", "CC"): 5 / 6,
("", "CD", "CD"): 3 / 11,
("", "CD", "DC"): 7 / 9,
("", "CD", "DD"): 1 / 10,
("", "DC", "CC"): 2 / 3,
("", "DC", "CD"): 1 / 11,
("", "DC", "DC"): 7 / 9,
("", "DC", "DD"): 1 / 10,
("", "DD", "CC"): 3 / 4,
("", "DD", "CD"): 2 / 11,
("", "DD", "DC"): 7 / 9,
("", "DD", "DD"): 1 / 10,
}
converted_original = convert_original_to_current(original_data)
self.assertEqual(self.player().lookup_dict, converted_original)
def test_vs_defector(self):
expected = [
(C, D),
(C, D),
(D, D),
(D, D),
(D, D),
(D, D),
(D, D),
(D, D),
(C, D),
(D, D),
]
self.versus_test(axl.Defector(), expected_actions=expected, seed=30)
def test_vs_cooperator(self):
expected = [
(C, C),
(C, C),
(C, C),
(C, C),
(C, C),
(D, C),
(C, C),
(D, C),
(C, C),
(C, C),
]
self.versus_test(axl.Cooperator(), expected_actions=expected, seed=33)
def test_vs_alternator(self):
expected = [(C, C), (C, D), (D, C), (D, D), (C, C), (C, D), (D, C)]
self.versus_test(axl.Alternator(), expected_actions=expected, seed=42)
def test_vs_alternator2(self):
expected = [(C, C), (C, D), (C, C), (D, D), (D, C), (C, D), (D, C)]
self.versus_test(axl.Alternator(), expected_actions=expected, seed=67)
class TestEvolvableGambler(unittest.TestCase):
def test_receive_vector(self):
plays, op_plays, op_start_plays = 1, 1, 1
player = axl.EvolvableGambler(
parameters=(plays, op_plays, op_start_plays), seed=1
)
self.assertRaises(
AttributeError,
axl.EvolvableGambler.__getattribute__,
*[player, "vector"]
)
vector = [random.random() for _ in range(8)]
player.receive_vector(vector)
self.assertEqual(player.pattern, vector)
def test_vector_to_instance(self):
plays, op_plays, op_start_plays = 1, 1, 1
player = axl.EvolvableGambler(
parameters=(plays, op_plays, op_start_plays), seed=1
)
vector = [random.random() for _ in range(8)]
player.receive_vector(vector)
keys = create_lookup_table_keys(
player_depth=plays,
op_depth=op_plays,
op_openings_depth=op_start_plays,
)
action_dict = dict(zip(keys, vector))
self.assertEqual(player._lookup.dictionary, action_dict)
def test_create_vector_bounds(self):
plays, op_plays, op_start_plays = 1, 1, 1
player = axl.EvolvableGambler(
parameters=(plays, op_plays, op_start_plays), seed=1
)
lb, ub = player.create_vector_bounds()
self.assertIsInstance(lb, list)
self.assertIsInstance(ub, list)
self.assertEqual(len(lb), 8)
self.assertEqual(len(ub), 8)
def test_mutate_value_bounds(self):
player = axl.EvolvableGambler(parameters=(1, 1, 1), seed=0)
self.assertEqual(player.mutate_value(2), 1)
self.assertEqual(player.mutate_value(-2), 0)
class TestEvolvableGambler2(TestEvolvablePlayer):
name = "EvolvableGambler"
player_class = axl.EvolvableGambler
parent_class = axl.Gambler
parent_kwargs = ["lookup_dict"]
init_parameters = {"parameters": (1, 1, 1), "initial_actions": (C,)}
class TestEvolvableGambler3(TestEvolvablePlayer):
name = "EvolvableGambler"
player_class = axl.EvolvableGambler
parent_class = axl.Gambler
parent_kwargs = ["lookup_dict"]
init_parameters = {
"parameters": (3, 2, 1),
"initial_actions": (
C,
C,
C,
),
}
class TestEvolvableGambler4(TestEvolvablePlayer):
name = "EvolvableGambler"
player_class = axl.EvolvableGambler
parent_class = axl.Gambler
parent_kwargs = ["lookup_dict"]
init_parameters = {
"parameters": (2, 2, 2),
"pattern": [random.random() for _ in range(64)],
"initial_actions": (
C,
C,
),
}
# Substitute EvolvableHMMPlayer as a regular HMMPlayer.
EvolvableGamblerWithDefault = PartialClass(
axl.EvolvableGambler,
pattern=tables[("PSO Gambler 2_2_2", 2, 2, 2)],
parameters=(2, 2, 2),
initial_actions=(
C,
C,
),
)
class EvolvableGamblerAsGambler(TestPSOGambler2_2_2):
player = EvolvableGamblerWithDefault
def test_equality_of_clone(self):
pass
def test_equality_of_pickle_clone(self):
pass
def test_repr(self):
pass
|
noxfile.py
|
jads-nl/intro-to-python
| 614 |
130863
|
<reponame>jads-nl/intro-to-python<filename>noxfile.py<gh_stars>100-1000
"""Configure nox as the task runner.
Nox provides the following tasks:
- "init-project": install the pre-commit hooks
- "doctests": run the xdoctests in the source files
- "fix-branch-references": adjusts links with git branch references in
various files (e.g., Mardown or notebooks)
"""
import contextlib
import glob
import os
import re
import shutil
import subprocess
import tempfile
import nox
REPOSITORY = "webartifex/intro-to-python"
SRC_LOCATIONS = (
"02_functions/sample_module.py",
"11_classes/sample_package",
)
# Use a unified .cache/ folder for all develop tools.
nox.options.envdir = ".cache/nox"
# All tools except git and poetry are project dependencies.
# Avoid accidental successes if the environment is not set up properly.
nox.options.error_on_external_run = True
@nox.session(name="init-project", venv_backend="none")
def init_project(session):
"""Install the pre-commit hooks."""
for type_ in (
"pre-commit",
"pre-merge-commit",
):
session.run("poetry", "run", "pre-commit", "install", f"--hook-type={type_}")
# Copy the extensions' JavaScript and CSS files into Jupyter's search directory.
session.run(
"poetry", "run", "jupyter", "contrib", "nbextension", "install", "--user"
)
@nox.session(venv_backend="none")
def doctests(session):
"""Run the xdoctests in the source files."""
for location in SRC_LOCATIONS:
session.run("poetry", "run", "xdoctest", "--silent", location)
@nox.session(name="fix-branch-references", venv_backend="none")
def fix_branch_references(_session):
"""Change git branch references.
Intended to be run as a pre-commit hook.
Many files in the project (e.g., README.md) contain links to resources on
github.com, nbviewer.jupyter.org, or mybinder.org that contain git branch
labels.
This task rewrites branch labels into either "main" or "develop".
"""
# Glob patterns that expand into the files whose links are re-written.
paths = ["*.md", "**/*.ipynb"]
branch = (
subprocess.check_output(
("git", "rev-parse", "--abbrev-ref", "HEAD"),
)
.decode()
.strip()
)
# If the current branch is only temporary and will be merged into "main", ...
if branch.startswith("release-") or branch.startswith("hotfix-"):
branch = "main"
# If the branch is not "main", we assume it is a feature branch.
elif branch != "main":
branch = "develop"
rewrites = [
{
"name": "github",
"pattern": re.compile(
fr"((((http)|(https))://github\.com/{REPOSITORY}/((blob)|(tree))/)([\w-]+)/)"
),
"replacement": fr"\2{branch}/",
},
{
"name": "nbviewer",
"pattern": re.compile(
fr"((((http)|(https))://nbviewer\.jupyter\.org/github/{REPOSITORY}/((blob)|(tree))/)([\w-]+)/)",
),
"replacement": fr"\2{branch}/",
},
{
"name": "mybinder",
"pattern": re.compile(
fr"((((http)|(https))://mybinder\.org/v2/gh/{REPOSITORY}/)([\w-]+)\?)",
),
"replacement": fr"\2{branch}?",
},
]
for expanded in _expand(*paths):
with _line_by_line_replace(expanded) as (old_file, new_file):
for line in old_file:
for rewrite in rewrites:
line = re.sub(rewrite["pattern"], rewrite["replacement"], line)
new_file.write(line)
def _expand(*patterns):
"""Expand glob patterns into paths.
Args:
*patterns: the patterns to be expanded
Yields:
path: a single expanded path
"""
for pattern in patterns:
yield from glob.glob(pattern.strip())
@contextlib.contextmanager
def _line_by_line_replace(path):
"""Replace/change the lines in a file one by one.
This generator function yields two file handles, one to the current file
(i.e., `old_file`) and one to its replacement (i.e., `new_file`).
Usage: loop over the lines in `old_file` and write the files to be kept
to `new_file`. Files not written to `new_file` are removed!
Args:
path: the file whose lines are to be replaced
Yields:
old_file, new_file: handles to a file and its replacement
"""
file_handle, new_file_path = tempfile.mkstemp()
with os.fdopen(file_handle, "w") as new_file:
with open(path) as old_file:
yield old_file, new_file
shutil.copymode(path, new_file_path)
os.remove(path)
shutil.move(new_file_path, path)
|
examples/clingo/tmode/tmode.py
|
potassco/gringo
| 423 |
130864
|
from sys import stdout, exit
from textwrap import dedent
from copy import copy
from clingo.application import Application
from clingo import SymbolType, Number, Function, ast, clingo_main
class TermTransformer(ast.Transformer):
def __init__(self, parameter):
self.parameter = parameter
def __get_param(self, name, location):
n = name.replace('\'', '')
primes = len(name) - len(n)
param = ast.SymbolicTerm(location, self.parameter)
if primes > 0:
param = ast.BinaryOperation(location, ast.BinaryOperator.Minus, param, ast.SymbolicTerm(location, Number(primes)))
return n, param
def visit_Function(self, term):
name, param = self.__get_param(term.name, term.location)
term = term.update(name=name)
term.arguments.append(param)
return term
def visit_SymbolicTerm(self, term):
# this function is not necessary if gringo's parser is used
# but this case could occur in a valid AST
raise RuntimeError("not implemented")
class ProgramTransformer(ast.Transformer):
def __init__(self, parameter):
self.final = False
self.parameter = parameter
self.term_transformer = TermTransformer(parameter)
def visit(self, x, *args, **kwargs):
ret = super().visit(x, *args, **kwargs)
if self.final and hasattr(ret, "body"):
if x is ret:
ret = copy(x)
loc = ret.location
fun = ast.Function(loc, "finally", [ast.SymbolicTerm(loc, self.parameter)], False)
atm = ast.SymbolicAtom(fun)
lit = ast.Literal(loc, ast.Sign.NoSign, atm)
ret.body.append(lit)
return ret
def visit_SymbolicAtom(self, atom):
return atom.update(symbol=self.term_transformer(atom.symbol))
def visit_Program(self, prg):
self.final = prg.name == "final"
prg = copy(prg)
if self.final:
prg.name = "static"
prg.parameters.append(ast.Id(prg.location, self.parameter.name))
return prg
def visit_ShowSignature(self, sig):
return sig.update(arity=sig.arity + 1)
def visit_ProjectSignature(self, sig):
return sig.update(arity=sig.arity + 1)
class TModeApp(Application):
def __init__(self):
self._imin = 0
self._imax = None
self._istop = "SAT"
self._horizon = 0
def _parse_imin(self, value):
try:
self._imin = int(value)
except ValueError:
return False
return self._imin >= 0
def _parse_imax(self, value):
if value.upper() in ("INF", "INFINITY"):
self._imax = None
return True
try:
self._imax = int(value)
except ValueError:
return False
return self._imax >= 0
def _parse_istop(self, value):
self._istop = value.upper()
return self._istop in ["SAT", "UNSAT", "UNKNOWN"]
def register_options(self, options):
group = "Incremental Options"
options.add(group, "imin", "Minimum number of solving steps [0]",
self._parse_imin, argument="<n>")
options.add(group, "imax", "Maximum number of solving steps [infinity]",
self._parse_imax, argument="<n>")
options.add(group, "istop", dedent("""\
Stop criterion [sat]
<arg>: {sat|unsat|unknown}"""), self._parse_istop)
def print_model(self, model, printer):
table = {}
for sym in model.symbols(shown=True):
if sym.type == SymbolType.Function and len(sym.arguments) > 0:
table.setdefault(sym.arguments[-1], []).append(Function(sym.name, sym.arguments[:-1]))
for step, symbols in sorted(table.items()):
stdout.write(" State {}:".format(step))
sig = None
for sym in sorted(symbols):
if (sym.name, len(sym.arguments)) != sig:
stdout.write("\n ")
sig = (sym.name, len(sym.arguments))
stdout.write(" {}".format(sym))
stdout.write("\n")
def _main(self, ctl):
step, ret = 0, None
while ((self._imax is None or step < self._imax) and
(step == 0 or step < self._imin or (
(self._istop == "SAT" and not ret.satisfiable) or
(self._istop == "UNSAT" and not ret.unsatisfiable) or
(self._istop == "UNKNOWN" and not ret.unknown)))):
parts = []
parts.append(("base", [Number(step)]))
parts.append(("static", [Number(step)]))
if step > 0:
ctl.release_external(Function("finally", [Number(step-1)]))
parts.append(("dynamic", [Number(step)]))
else:
parts.append(("initial", [Number(0)]))
ctl.ground(parts)
ctl.assign_external(Function("finally", [Number(step)]), True)
ret, step = ctl.solve(), step+1
def main(self, ctl, files):
with ast.ProgramBuilder(ctl) as bld:
ptf = ProgramTransformer(Function("__t"))
ast.parse_files(files, lambda stm: bld.add(ptf(stm)))
ctl.add("initial", ["t"], "initially(t).")
ctl.add("static", ["t"], "#external finally(t).")
self._main(ctl)
exit(clingo_main(TModeApp()))
|
linebot/async_http_client.py
|
naotokuwa/line-bot-sdk-python
| 1,563 |
130893
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""linebot.async_http_client module."""
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod, abstractproperty
from future.utils import with_metaclass
class AsyncHttpClient(with_metaclass(ABCMeta)):
"""Abstract Base Classes of HttpClient."""
DEFAULT_TIMEOUT = 5
def __init__(self, timeout=DEFAULT_TIMEOUT):
"""__init__ method.
:param timeout: (optional) How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is :py:attr:`DEFAULT_TIMEOUT`
:type timeout: float | tuple(float, float)
:rtype: T <= :py:class:`HttpResponse`
:return: HttpResponse instance
"""
self.timeout = timeout
@abstractmethod
async def get(self, url, headers=None, params=None, timeout=None):
"""GET request.
:param str url: Request url
:param dict headers: (optional) Request headers
:param dict params: (optional) Request query parameter
:param timeout: (optional), How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is :py:attr:`self.timeout`
:type timeout: float | tuple(float, float)
:rtype: T <= :py:class:`HttpResponse`
:return: HttpResponse instance
"""
raise NotImplementedError
@abstractmethod
async def post(self, url, headers=None, data=None, timeout=None):
"""POST request.
:param str url: Request url
:param dict headers: (optional) Request headers
:param data: (optional) Dictionary, bytes, or file-like object to send in the body
:param timeout: (optional), How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is :py:attr:`self.timeout`
:type timeout: float | tuple(float, float)
:rtype: T <= :py:class:`HttpResponse`
:return: HttpResponse instance
"""
raise NotImplementedError
@abstractmethod
async def delete(self, url, headers=None, data=None, timeout=None):
"""DELETE request.
:param str url: Request url
:param dict headers: (optional) Request headers
:param data: (optional) Dictionary, bytes, or file-like object to send in the body
:param timeout: (optional), How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is :py:attr:`self.timeout`
:type timeout: float | tuple(float, float)
:rtype: T <= :py:class:`HttpResponse`
:return: HttpResponse instance
"""
raise NotImplementedError
@abstractmethod
async def put(self, url, headers=None, data=None, timeout=None):
"""PUT request.
:param str url: Request url
:param dict headers: (optional) Request headers
:param data: (optional) Dictionary, bytes, or file-like object to send in the body
:param timeout: (optional), How long to wait for the server
to send data before giving up, as a float,
or a (connect timeout, read timeout) float tuple.
Default is :py:attr:`self.timeout`
:type timeout: float | tuple(float, float)
:rtype: :py:class:`AsyncHttpResponse`
:return: AsyncHttpResponse instance
"""
raise NotImplementedError
class AsyncHttpResponse(with_metaclass(ABCMeta)):
"""HttpResponse."""
@abstractproperty
def status_code(self):
"""Get status code."""
raise NotImplementedError
@abstractproperty
def headers(self):
"""Get headers."""
raise NotImplementedError
@abstractproperty
async def text(self):
"""Get response body as text-decoded."""
raise NotImplementedError
@abstractproperty
async def content(self):
"""Get response body as binary."""
raise NotImplementedError
@abstractproperty
def json(self):
"""Get response body as json-decoded."""
raise NotImplementedError
@abstractmethod
def iter_content(self, chunk_size=1024):
"""Get response body as iterator content (stream).
:param int chunk_size:
"""
raise NotImplementedError
|
ignite/distributed/launcher.py
|
iamhardikat11/ignite
| 4,119 |
130909
|
<reponame>iamhardikat11/ignite<gh_stars>1000+
from typing import Any, Callable, Dict, Optional
from ignite.distributed import utils as idist
from ignite.utils import setup_logger
__all__ = [
"Parallel",
]
class Parallel:
"""Distributed launcher context manager to simplify distributed configuration setup for multiple backends:
- backends from native torch distributed configuration: "nccl", "gloo" and "mpi" (if available)
- XLA on TPUs via `pytorch/xla <https://github.com/pytorch/xla>`_ (if installed)
- using `Horovod distributed framework <https://horovod.readthedocs.io>`_ (if installed)
Namely, it can:
1) Spawn ``nproc_per_node`` child processes and initialize a processing group according to
provided ``backend`` (useful for standalone scripts).
2) Only initialize a processing group given the ``backend``
(useful with tools like `torch.distributed.launch`_, `horovodrun`_, etc).
Args:
backend: backend to use: `nccl`, `gloo`, `xla-tpu`, `horovod`. If None, no distributed
configuration.
nproc_per_node: optional argument, number of processes per
node to specify. If not None, :meth:`~ignite.distributed.launcher.Parallel.run`
will spawn ``nproc_per_node`` processes that run input function with its arguments.
nnodes: optional argument, number of nodes participating in distributed configuration.
If not None, :meth:`~ignite.distributed.launcher.Parallel.run` will spawn ``nproc_per_node``
processes that run input function with its arguments. Total world size is `nproc_per_node * nnodes`.
This option is only supported by native torch distributed module. For other modules, please setup
``spawn_kwargs`` with backend specific arguments.
node_rank: optional argument, current machine index. Mandatory argument if ``nnodes`` is
specified and larger than one.
This option is only supported by native torch distributed module. For other modules, please setup
``spawn_kwargs`` with backend specific arguments.
master_addr: optional argument, master node TCP/IP address for torch native backends
(`nccl`, `gloo`). Mandatory argument if ``nnodes`` is specified and larger than one.
master_port: optional argument, master node port for torch native backends
(`nccl`, `gloo`). Mandatory argument if ``master_addr`` is specified.
init_method: optional argument to specify processing group initialization method for torch native
backends (`nccl`, `gloo`). Default, "env://".
See more info: `dist.init_process_group`_.
spawn_kwargs: kwargs to ``idist.spawn`` function.
Examples:
1) Single node or Multi-node, Multi-GPU training launched with `torch.distributed.launch`_ or `horovodrun`_
tools
Single node option with 4 GPUs
.. code-block:: bash
python -m torch.distributed.launch --nproc_per_node=4 --use_env main.py
# or if installed horovod
horovodrun -np=4 python main.py
Multi-node option : 2 nodes with 8 GPUs each
.. code-block:: bash
## node 0
python -m torch.distributed.launch --nnodes=2 --node_rank=0 --master_addr=master \
--master_port=3344 --nproc_per_node=8 --use_env main.py
# or if installed horovod
horovodrun -np 16 -H hostname1:8,hostname2:8 python main.py
## node 1
python -m torch.distributed.launch --nnodes=2 --node_rank=1 --master_addr=master \
--master_port=3344 --nproc_per_node=8 --use_env main.py
User code is the same for both options:
.. code-block:: python
# main.py
import ignite.distributed as idist
def training(local_rank, config, **kwargs):
# ...
print(idist.get_rank(), ": run with config:", config, "- backend=", idist.backend())
# ...
backend = "nccl" # or "horovod" if package is installed
with idist.Parallel(backend=backend) as parallel:
parallel.run(training, config, a=1, b=2)
2) Single node, Multi-GPU training launched with `python`
.. code-block:: bash
python main.py
.. code-block:: python
# main.py
import ignite.distributed as idist
def training(local_rank, config, **kwargs):
# ...
print(idist.get_rank(), ": run with config:", config, "- backend=", idist.backend())
# ...
backend = "nccl" # or "horovod" if package is installed
# no "init_method" was specified , "env://" will be used
with idist.Parallel(backend=backend, nproc_per_node=4) as parallel:
parallel.run(training, config, a=1, b=2)
Initializing the process using ``file://``
.. code-block:: python
with idist.Parallel(backend=backend, init_method='file:///d:/tmp/some_file', nproc_per_node=4) as parallel:
parallel.run(training, config, a=1, b=2)
Initializing the process using ``tcp://``
.. code-block:: python
with idist.Parallel(backend=backend, init_method='tcp://10.1.1.20:23456', nproc_per_node=4) as parallel:
parallel.run(training, config, a=1, b=2)
3) Single node, Multi-TPU training launched with `python`
.. code-block:: bash
python main.py
.. code-block:: python
# main.py
import ignite.distributed as idist
def training(local_rank, config, **kwargs):
# ...
print(idist.get_rank(), ": run with config:", config, "- backend=", idist.backend())
# ...
with idist.Parallel(backend="xla-tpu", nproc_per_node=8) as parallel:
parallel.run(training, config, a=1, b=2)
4) Multi-node, Multi-GPU training launched with `python`. For example, 2 nodes with 8 GPUs:
Using torch native distributed framework:
.. code-block:: bash
# node 0
python main.py --node_rank=0
# node 1
python main.py --node_rank=1
.. code-block:: python
# main.py
import ignite.distributed as idist
def training(local_rank, config, **kwargs):
# ...
print(idist.get_rank(), ": run with config:", config, "- backend=", idist.backend())
# ...
dist_config = {
"nproc_per_node": 8,
"nnodes": 2,
"node_rank": args.node_rank,
"master_addr": "master",
"master_port": 15000
}
with idist.Parallel(backend="nccl", **dist_config) as parallel:
parallel.run(training, config, a=1, b=2)
.. _torch.distributed.launch: https://pytorch.org/docs/stable/distributed.html#launch-utility
.. _horovodrun: https://horovod.readthedocs.io/en/latest/api.html#module-horovod.run
.. _dist.init_process_group: https://pytorch.org/docs/stable/distributed.html#torch.distributed.init_process_group
.. versionchanged:: 0.4.2
``backend`` now accepts `horovod` distributed framework.
.. versionchanged:: 0.4.5
``init_method`` added.
"""
def __init__(
self,
backend: Optional[str] = None,
nproc_per_node: Optional[int] = None,
nnodes: Optional[int] = None,
node_rank: Optional[int] = None,
master_addr: Optional[str] = None,
master_port: Optional[int] = None,
init_method: Optional[str] = None,
**spawn_kwargs: Any,
) -> None:
if backend is not None:
if backend not in idist.available_backends():
raise ValueError(f"Unknown backend '{backend}'. Available backends: {idist.available_backends()}")
else:
arg_names = ["nproc_per_node", "nnodes", "node_rank", "master_addr", "master_port"]
arg_values = [nproc_per_node, nnodes, node_rank, master_addr, master_port]
for name, value in zip(arg_names, arg_values):
if value is not None:
raise ValueError(f"If backend is None, argument '{name}' should be also None, but given {value}")
self.backend = backend
self._spawn_params = None
self.init_method = init_method
if self.backend is not None:
if nproc_per_node is not None:
self._spawn_params = self._setup_spawn_params(
nproc_per_node, nnodes, node_rank, master_addr, master_port, init_method, **spawn_kwargs
)
# The logger will be setup after the idist.initialize() call
self._logger = None
@staticmethod
def _setup_spawn_params(
nproc_per_node: int,
nnodes: Optional[int] = None,
node_rank: Optional[int] = None,
master_addr: Optional[str] = None,
master_port: Optional[int] = None,
init_method: Optional[str] = None,
**spawn_kwargs: Any,
) -> Dict:
if nproc_per_node < 1:
raise ValueError(f"Argument nproc_per_node should positive, but given {nproc_per_node}")
if nnodes is None:
nnodes = 1
if nnodes < 1:
raise ValueError(f"Argument nnodes should positive, but given {nnodes}")
if node_rank is None:
if nnodes > 1:
raise ValueError("If number of nodes larger than one, arguments node_rank should be given")
node_rank = 0
if node_rank >= nnodes or node_rank < 0:
raise ValueError(f"Argument node_rank should be between 0 and {nnodes - 1}, but given {node_rank}")
if nnodes > 1 and (master_addr is None or master_port is None or init_method is None):
raise ValueError(
"If number of nodes larger than one, arguments master_addr and master_port or init_method"
f"should be specified, but given master_addr={master_addr}, master_port={master_port} and "
f"init_method={init_method}."
)
params = {
"nproc_per_node": nproc_per_node,
"nnodes": nnodes,
"node_rank": node_rank,
"master_addr": master_addr,
"master_port": master_port,
"init_method": init_method,
}
params.update(spawn_kwargs)
return {k: v for k, v in params.items() if v is not None}
def run(self, func: Callable, *args: Any, **kwargs: Any) -> None:
"""Execute ``func`` with provided arguments in distributed context.
Args:
func: function to execute. First argument of the function should be `local_rank` - local process
index.
args: positional arguments of ``func`` (without `local_rank`).
kwargs: keyword arguments of ``func``.
Examples:
.. code-block:: python
def training(local_rank, config, **kwargs):
# ...
print(idist.get_rank(), ": run with config:", config, "- backend=", idist.backend())
# ...
with idist.Parallel(backend=backend) as parallel:
parallel.run(training, config, a=1, b=2)
"""
if self._spawn_params is not None and self.backend is not None:
self._logger.info( # type: ignore[attr-defined]
f"Spawn function '{func}' in {self._spawn_params['nproc_per_node']} processes"
)
idist.spawn(self.backend, func, args=args, kwargs_dict=kwargs, **self._spawn_params)
else:
self._logger.info(f"- Run '{func}' in {idist.get_world_size()} processes") # type: ignore[attr-defined]
local_rank = idist.get_local_rank()
func(local_rank, *args, **kwargs)
self._logger.info("End of run") # type: ignore[attr-defined]
def __enter__(self) -> "Parallel":
if self.backend is not None and self._spawn_params is None:
idist.initialize(self.backend, init_method=self.init_method)
# The logger can be setup from now since idist.initialize() has been called (if needed)
self._logger = setup_logger(__name__ + "." + self.__class__.__name__) # type: ignore[assignment]
if self.backend is not None:
if self._spawn_params is None:
self._logger.info( # type: ignore[attr-defined]
f"Initialized processing group with backend: '{self.backend}'"
)
else:
self._logger.info( # type: ignore[attr-defined]
f"Initialized distributed launcher with backend: '{self.backend}'"
)
msg = "\n\t".join([f"{k}: {v}" for k, v in self._spawn_params.items() if v is not None])
self._logger.info(f"- Parameters to spawn processes: \n\t{msg}") # type: ignore[attr-defined]
return self
def __exit__(self, *args: Any, **kwargs: Any) -> None:
if (self.backend is not None) and self._spawn_params is None:
self._logger.info( # type: ignore[attr-defined]
f"Finalized processing group with backend: '{self.backend}'"
)
idist.finalize()
|
frappe/desk/form/test_form.py
|
Don-Leopardo/frappe
| 3,755 |
130922
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import frappe, unittest
from frappe.desk.form.linked_with import get_linked_docs, get_linked_doctypes
class TestForm(unittest.TestCase):
def test_linked_with(self):
results = get_linked_docs("Role", "System Manager", linkinfo=get_linked_doctypes("Role"))
self.assertTrue("User" in results)
self.assertTrue("DocType" in results)
if __name__=="__main__":
frappe.connect()
unittest.main()
|
VMEncryption/test/test_disk_util.py
|
shridpant/azure-linux-extensions
| 266 |
130949
|
<gh_stars>100-1000
import unittest
import mock
from main.Common import CryptItem
from main.EncryptionEnvironment import EncryptionEnvironment
from main.DiskUtil import DiskUtil
from console_logger import ConsoleLogger
from test_utils import MockDistroPatcher
class TestDiskUtil(unittest.TestCase):
""" unit tests for functions in the CryptMountConfig module """
def setUp(self):
self.logger = ConsoleLogger()
self.disk_util = DiskUtil(None, MockDistroPatcher('Ubuntu', '14.04', '4.15'), self.logger, EncryptionEnvironment(None, self.logger))
def _mock_open_with_read_data_dict(self, open_mock, read_data_dict):
open_mock.content_dict = read_data_dict
def _open_side_effect(filename, mode, *args, **kwargs):
read_data = open_mock.content_dict.get(filename)
mock_obj = mock.mock_open(read_data=read_data)
handle = mock_obj.return_value
def write_handle(data, *args, **kwargs):
if 'a' in mode:
open_mock.content_dict[filename] += data
else:
open_mock.content_dict[filename] = data
def write_lines_handle(data, *args, **kwargs):
if 'a' in mode:
open_mock.content_dict[filename] += "".join(data)
else:
open_mock.content_dict[filename] = "".join(data)
handle.write.side_effect = write_handle
handle.writelines.side_effect = write_lines_handle
return handle
open_mock.side_effect = _open_side_effect
def _create_expected_crypt_item(self,
mapper_name=None,
dev_path=None,
uses_cleartext_key=None,
luks_header_path=None,
mount_point=None,
file_system=None,
current_luks_slot=None):
crypt_item = CryptItem()
crypt_item.mapper_name = mapper_name
crypt_item.dev_path = dev_path
crypt_item.uses_cleartext_key = uses_cleartext_key
crypt_item.luks_header_path = luks_header_path
crypt_item.mount_point = mount_point
crypt_item.file_system = file_system
crypt_item.current_luks_slot = current_luks_slot
return crypt_item
def test_parse_crypttab_line(self):
# empty line
line = ""
crypt_item = self.disk_util.parse_crypttab_line(line)
self.assertEquals(None, crypt_item)
# line with not enough entries
line = "mapper_name dev_path"
crypt_item = self.disk_util.parse_crypttab_line(line)
self.assertEquals(None, crypt_item)
# commented out line
line = "# mapper_name dev_path"
crypt_item = self.disk_util.parse_crypttab_line(line)
self.assertEquals(None, crypt_item)
# An unfamiliar key_file_path implies that we shouln't be processing this crypttab line
line = "mapper_name /dev/dev_path /non_managed_key_file_path"
crypt_item = self.disk_util.parse_crypttab_line(line)
self.assertEquals(None, crypt_item)
# a bare bones crypttab line
line = "mapper_name /dev/dev_path /mnt/azure_bek_disk/LinuxPassPhraseFileName luks"
expected_crypt_item = self._create_expected_crypt_item(mapper_name="mapper_name",
dev_path="/dev/dev_path")
crypt_item = self.disk_util.parse_crypttab_line(line)
self.assertEquals(str(expected_crypt_item), str(crypt_item))
# a line that implies a cleartext key
line = "mapper_name /dev/dev_path /var/lib/azure_disk_encryption_config/cleartext_key_mapper_name luks"
expected_crypt_item = self._create_expected_crypt_item(mapper_name="mapper_name",
dev_path="/dev/dev_path",
uses_cleartext_key=True)
crypt_item = self.disk_util.parse_crypttab_line(line)
self.assertEquals(str(expected_crypt_item), str(crypt_item))
# a line that implies a luks header
line = "mapper_name /dev/dev_path /var/lib/azure_disk_encryption_config/cleartext_key_mapper_name luks,header=headerfile"
expected_crypt_item = self._create_expected_crypt_item(mapper_name="mapper_name",
dev_path="/dev/dev_path",
uses_cleartext_key=True,
luks_header_path="headerfile")
crypt_item = self.disk_util.parse_crypttab_line(line)
self.assertEquals(str(expected_crypt_item), str(crypt_item))
@mock.patch('__builtin__.open')
@mock.patch('os.path.exists', return_value=True)
def test_should_use_azure_crypt_mount(self, exists_mock, open_mock):
# if the acm file exists and has only a root disk
acm_contents = """
osencrypt /dev/dev_path None / ext4 False 0
"""
mock.mock_open(open_mock, acm_contents)
self.assertFalse(self.disk_util.should_use_azure_crypt_mount())
# if the acm file exists and has a data disk
acm_contents = """
mapper_name /dev/dev_path None /mnt/point ext4 False 0
mapper_name2 /dev/dev_path2 None /mnt/point2 ext4 False 0
"""
mock.mock_open(open_mock, acm_contents)
self.assertTrue(self.disk_util.should_use_azure_crypt_mount())
# empty file
mock.mock_open(open_mock, "")
self.assertFalse(self.disk_util.should_use_azure_crypt_mount())
# no file
exists_mock.return_value = False
open_mock.reset_mock()
self.assertFalse(self.disk_util.should_use_azure_crypt_mount())
open_mock.assert_not_called()
@mock.patch('os.path.exists', return_value=True)
@mock.patch('main.DiskUtil.ProcessCommunicator')
@mock.patch('main.CommandExecutor.CommandExecutor', autospec=True)
@mock.patch('__builtin__.open')
@mock.patch('main.DiskUtil.DiskUtil.should_use_azure_crypt_mount')
@mock.patch('main.DiskUtil.DiskUtil.get_encryption_status')
@mock.patch('main.DiskUtil.DiskUtil.get_mount_items')
def test_get_crypt_items(self, get_mount_items_mock, get_enc_status_mock, use_acm_mock, open_mock, ce_mock, pc_mock, exists_mock):
self.disk_util.command_executor = ce_mock
use_acm_mock.return_value = True # Use the Azure_Crypt_Mount file
get_enc_status_mock.return_value = "{\"os\" : \"Encrypted\"}"
acm_contents = """
osencrypt /dev/dev_path None / ext4 True 0
"""
mock.mock_open(open_mock, acm_contents)
crypt_items = self.disk_util.get_crypt_items()
self.assertListEqual([self._create_expected_crypt_item(mapper_name="osencrypt",
dev_path="/dev/dev_path",
uses_cleartext_key=True,
mount_point="/",
file_system="ext4",
current_luks_slot=0)],
crypt_items)
ce_mock.ExecuteInBash.return_value = 0 # The grep on cryptsetup succeeds
pc_mock.return_value.stdout = "osencrypt /dev/dev_path" # The grep find this line in there
mock.mock_open(open_mock, "") # No content in the azure crypt mount file
get_mount_items_mock.return_value = [{"src": "/dev/mapper/osencrypt", "dest": "/", "fs": "ext4"}]
exists_mock.return_value = False # No luksheader file found
crypt_items = self.disk_util.get_crypt_items()
self.assertListEqual([self._create_expected_crypt_item(mapper_name="osencrypt",
dev_path="/dev/dev_path",
mount_point="/",
file_system="ext4")],
crypt_items)
use_acm_mock.return_value = False # Now, use the /etc/crypttab file
exists_mock.return_value = True # Crypttab file found
self._mock_open_with_read_data_dict(open_mock, {"/etc/fstab": "/dev/mapper/osencrypt / ext4 defaults,nofail 0 0",
"/etc/crypttab": "osencrypt /dev/sda1 /mnt/azure_bek_disk/LinuxPassPhraseFileName luks,discard"})
crypt_items = self.disk_util.get_crypt_items()
self.assertListEqual([self._create_expected_crypt_item(mapper_name="osencrypt",
dev_path="/dev/sda1",
file_system=None,
mount_point="/")],
crypt_items)
# if there was no crypttab entry for osencrypt
exists_mock.side_effect = [True, False] # Crypttab file found but luksheader not found
self._mock_open_with_read_data_dict(open_mock, {"/etc/fstab": "/dev/mapper/osencrypt / ext4 defaults,nofail 0 0", "/etc/crypttab": ""})
ce_mock.ExecuteInBash.return_value = 0 # The grep on cryptsetup succeeds
pc_mock.return_value.stdout = "osencrypt /dev/sda1" # The grep find this line in there
crypt_items = self.disk_util.get_crypt_items()
self.assertListEqual([self._create_expected_crypt_item(mapper_name="osencrypt",
dev_path="/dev/sda1",
file_system="ext4",
mount_point="/")],
crypt_items)
exists_mock.side_effect = None # Crypttab file found
exists_mock.return_value = True # Crypttab file found
get_enc_status_mock.return_value = "{\"os\" : \"NotEncrypted\"}"
self._mock_open_with_read_data_dict(open_mock, {"/etc/fstab": "",
"/etc/crypttab": ""})
crypt_items = self.disk_util.get_crypt_items()
self.assertListEqual([],
crypt_items)
self._mock_open_with_read_data_dict(open_mock, {"/etc/fstab": "/dev/mapper/encrypteddatadisk /mnt/datadisk auto defaults,nofail 0 0",
"/etc/crypttab": "encrypteddatadisk /dev/disk/azure/scsi1/lun0 /someplainfile luks"})
crypt_items = self.disk_util.get_crypt_items()
self.assertListEqual([],
crypt_items)
self._mock_open_with_read_data_dict(open_mock, {"/etc/fstab": "/dev/mapper/encrypteddatadisk /mnt/datadisk auto defaults,nofail 0 0",
"/etc/crypttab": "encrypteddatadisk /dev/disk/azure/scsi1/lun0 /mnt/azure_bek_disk/LinuxPassPhraseFileName luks,discard,header=/headerfile"})
crypt_items = self.disk_util.get_crypt_items()
self.assertListEqual([self._create_expected_crypt_item(mapper_name="encrypteddatadisk",
dev_path="/dev/disk/azure/scsi1/lun0",
file_system=None,
luks_header_path="/headerfile",
mount_point="/mnt/datadisk")],
crypt_items)
@mock.patch('shutil.copy2', return_value=True)
@mock.patch('os.rename', return_value=True)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('__builtin__.open')
@mock.patch('main.DiskUtil.DiskUtil.should_use_azure_crypt_mount', return_value=True)
@mock.patch('main.DiskUtil.DiskUtil.get_encryption_status')
def test_migrate_crypt_items(self, get_enc_status_mock, use_acm_mock, open_mock, exists_mock, rename_mock, shutil_mock):
def rename_side_effect(name1, name2):
use_acm_mock.return_value = False
return True
rename_mock.side_effect = rename_side_effect
get_enc_status_mock.return_value = "{\"os\" : \"NotEncrypted\"}"
# Test 1: migrate an entry
self._mock_open_with_read_data_dict(open_mock, {"/var/lib/azure_disk_encryption_config/azure_crypt_mount": "mapper_name /dev/dev_path None /mnt/point ext4 False 0",
"/etc/fstab.azure.backup": "/dev/dev_path /mnt/point ext4 defaults,nofail 0 0",
"/etc/fstab": "",
"/etc/crypttab": ""})
self.disk_util.migrate_crypt_items("/test_passphrase_path")
self.assertTrue("/dev/mapper/mapper_name /mnt/point" in open_mock.content_dict["/etc/fstab"])
self.assertTrue("mapper_name /dev/dev_path /test_passphrase_path" in open_mock.content_dict["/etc/crypttab"])
# Test 2: migrate no entry
use_acm_mock.return_value = True
self._mock_open_with_read_data_dict(open_mock, {"/var/lib/azure_disk_encryption_config/azure_crypt_mount": "",
"/etc/fstab.azure.backup": "",
"/etc/fstab": "",
"/etc/crypttab": ""})
self.disk_util.migrate_crypt_items("/test_passphrase_path")
self.assertTrue("" == open_mock.content_dict["/etc/fstab"].strip())
self.assertTrue("" == open_mock.content_dict["/etc/crypttab"].strip())
# Test 3: skip migrating the OS entry
use_acm_mock.return_value = True
self._mock_open_with_read_data_dict(open_mock, {"/var/lib/azure_disk_encryption_config/azure_crypt_mount": "osencrypt /dev/dev_path None / ext4 False 0",
"/etc/fstab.azure.backup": "/dev/dev_path / ext4 defaults 0 0",
"/etc/fstab": "",
"/etc/crypttab": ""})
self.disk_util.migrate_crypt_items("/test_passphrase_path")
self.assertTrue("" == open_mock.content_dict["/etc/fstab"].strip())
self.assertTrue("" == open_mock.content_dict["/etc/crypttab"].strip())
# Test 4: migrate many entries
use_acm_mock.return_value = True
acm_contents = """
mapper_name /dev/dev_path None /mnt/point ext4 False 0
mapper_name2 /dev/dev_path2 None /mnt/point2 ext4 False 0
"""
fstab_backup_contents = """
/dev/dev_path /mnt/point ext4 defaults,nofail 0 0
/dev/dev_path2 /mnt/point2 ext4 defaults,nofail 0 0
"""
self._mock_open_with_read_data_dict(open_mock, {"/var/lib/azure_disk_encryption_config/azure_crypt_mount": acm_contents,
"/etc/fstab.azure.backup": fstab_backup_contents,
"/etc/fstab": "",
"/etc/crypttab": ""})
self.disk_util.migrate_crypt_items("/test_passphrase_path")
self.assertTrue("/dev/mapper/mapper_name /mnt/point ext4 defaults,nofail 0 0\n" in open_mock.content_dict["/etc/fstab"])
self.assertTrue("\n/dev/mapper/mapper_name2 /mnt/point2 ext4 defaults,nofail 0 0" in open_mock.content_dict["/etc/fstab"])
self.assertTrue("\nmapper_name /dev/dev_path /test_passphrase_path" in open_mock.content_dict["/etc/crypttab"])
self.assertTrue("\nmapper_name2 /dev/dev_path2 /test_passphrase_path" in open_mock.content_dict["/etc/crypttab"])
|
SohuNewCrawler/TK_News.py
|
wangbl11/ECommerceCrawlers
| 3,469 |
130964
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__author__ = 'AJay'
__mtime__ = '2019/4/15 0015'
"""
# ! /usr/bin/env python
# -*- coding: utf-8 -*-
import time
import threading
from datetime import datetime
import tkinter as tk
import os
from db import MongoArticle,MongoUrl,MongoConfig
from multiprocessing import Process, JoinableQueue
from tkinter import *
from tkinter import scrolledtext
from tkinter import messagebox
class MainPage(object):
def __init__(self, master):
self.window = master
sw = self.window.winfo_screenwidth()
sh = self.window.winfo_screenheight()
ww = 1400
wh = 650
x = (sw - ww) / 2
y = (sh - wh) / 2
self.window.geometry('%dx%d+%d+%d' % (ww, wh, x, y)) # 父容器大小
self.threadnumVar = tk.IntVar()
self.timeVar = tk.IntVar()
self.save_pathVar = tk.StringVar()
self.logMessage = JoinableQueue()
self.errMessage = JoinableQueue()
self.dbconf = MongoConfig()
self.dburl = MongoUrl()
self.dbarticle = MongoArticle()
self.create_page()
self.show_logs()
self.asyCraler()
def asyCraler(self):
from run_main import NewsClawer
nc = NewsClawer()
nc.init_set()
t = threading.Thread(target=nc.run, args=())
t.start()
print('启动主线程')
def say_export_data(self):
t = threading.Thread(target=self.export_data, args=())
t.start()
print('启动主线程保存数据')
self.exportDbBtn.config(state=tk.DISABLED)
def _temp_t(self):
from souhu.souhu_new import SouhuSpider
ss = SouhuSpider()
self.startBtn.config(text='正在采集')
while True:
ss.run(self.logMessage,self.errMessage)
configs = self.dbconf.select_one()
sleep_time = configs.get("time", 60)
print(sleep_time)
time.sleep(int(sleep_time))
self.errMessage.put('【周期扫描】:{}秒'.format(sleep_time))
def create_page(self):
self.meun() # 菜单
self.config() # 配置
self.log() # 日志
self.error_log() # 系统日志
self.img() # 图片
# self.loading() # 进度条
def img(self): # 图片
photo = PhotoImage(file='news.png')
label = Label(image=photo)
label.image = photo
label.grid(row=0, column=2, columnspan=2, rowspan=2, sticky=W + E + N + S, padx=5, pady=5)
def config(self): # 配置
Config = tk.LabelFrame(self.window, text="配置", padx=25, pady=5) # 水平,垂直方向上的边距均为 10
Config.place(x=30, y=100)
tk.Label(Config, text="爬取频率/s:").grid(column=0, row=0, sticky='w', pady=5) #
tk.Label(Config, text="爬取线程:").grid(column=0, row=1, sticky='w', pady=5) # 添加波特率标签
tk.Label(Config, text="保存路径:").grid(column=0, row=2, sticky='w', pady=5) # 添加波特率标签
try:
configs = self.dbconf.select_one()
self.threadnum = configs.get('thread')
self.timenum = configs.get('time')
self.save_path = configs.get('path')
except Exception as e:
self.dbconf.insert({"flag": 1, "time": 60, "thread": 10,"path":"news"})
self.threadnum = 10
self.timenum = 60
self.save_path="默认路径news"
self.threadnumVar.set(self.threadnum)
self.timeVar.set(self.timenum)
self.save_pathVar.set(self.save_path)
self.threadEntry = tk.Entry(Config, textvariable=self.threadnumVar, width=22)
self.threadEntry.grid(column=1, row=1, pady=5)
self.timeEntry = tk.Entry(Config, textvariable=self.timeVar, width=22)
self.timeEntry.grid(column=1, row=0, pady=5)
print(self.save_pathVar)
self.pathEntry = tk.Entry(Config, textvariable=self.save_pathVar, width=22)
self.pathEntry.grid(column=1, row=2, pady=5)
self.logoutBtn = tk.Button(Config, text="测试路径", command=self.check_path)
self.logoutBtn.grid(column=2, row=2, pady=5, ipadx=15, padx=15)
Config_start = tk.LabelFrame(self.window, text="", padx=10, pady=5) # 水平,垂直方向上的边距均为 10
Config_start.place(x=30, y=250)
tk.Button(Config_start, text="更新配置", command=self.updata_config).grid(column=0, row=0, pady=5, ipadx=20,padx=15)
self.clearDbBtn = tk.Button(Config_start, text="清空数据库", command=self.clearDB)
self.clearDbBtn.config(bg='red')
self.clearDbBtn.grid(column=1, row=1, pady=5, ipadx=15,padx=15)
self.logoutBtn = tk.Button(Config_start, text="清除缓存", command=self.clear_product)
self.logoutBtn.grid(column=0, row=1, pady=5, ipadx=15,padx=15)
self.exportDbBtn = tk.Button(Config_start, text="导出数据", command=self.say_export_data)
# self.exportDbBtn.config(state=tk.DISABLED)
self.exportDbBtn.grid(column=2, row=1, pady=5, ipadx=15,padx=15)
self.startBtn = tk.Button(Config_start, text="开始采集", command=self.start_spider)
self.startBtn.grid(column=0, row=2, pady=5, ipadx=15)
# self.stopBtn = tk.Button(Config_start, text="停止采集", command=self.stop_spider)
# self.stopBtn.grid(column=2, row=2, pady=5, ipadx=15)
def log(self): # 日志
self.logMessage.put('欢迎使用【新闻网采集器器定制版ByAjay13】')
logInformation = tk.LabelFrame(self.window, text="日志", padx=10, pady=10) # 水平,垂直方向上的边距均为10
logInformation.place(x=450, y=100)
self.logInformation_Window = scrolledtext.ScrolledText(logInformation, width=118, height=22, padx=10, pady=10,
wrap=tk.WORD)
self.logInformation_Window.grid()
def error_log(self): # 系统日志
error_logInformation = tk.LabelFrame(self.window, text="系统日志", padx=10, pady=10) # 水平,垂直方向上的边距均为10
error_logInformation.place(x=450, y=460)
self.errorInformation_Window = scrolledtext.ScrolledText(error_logInformation, width=118, height=8, padx=10,
pady=10,
wrap=tk.WORD)
self.errorInformation_Window.grid()
# 菜单说明
def meun(self):
menubar = tk.Menu(self.window)
aboutmemu = tk.Menu(menubar, tearoff=0)
menubar.add_cascade(label='关于', menu=aboutmemu)
aboutmemu.add_command(label='软件说明', command=self.show_Description)
aboutmemu.add_command(label='版本', command=self.show_Version)
aboutmemu.add_command(label='开发者', command=self.show_Developer)
window.config(menu=menubar)
# 检测路径
def check_path(self):
from export_article import EexportTxt
et = EexportTxt()
path = self.pathEntry.get()
checkout = et.check_input_path(path)
if checkout:
tk.messagebox.showinfo(title='路径', message='路径正确!')
elif path=="默认路径news":
tk.messagebox.showinfo(title='路径', message='保存路径将作为默认路径!')
else:
tk.messagebox.showerror(title='路径', message='路径不正确!创建正确路径')
# 导出数据
def export_data(self):
from export_article import EexportTxt
et = EexportTxt()
path = self.pathEntry.get()
et.run(input_path=path,errMessage=self.errMessage)
# 跟新配置
def updata_config(self):
self.logMessage.put('更新配置')
threadnum = self.threadEntry.get()
timenum = self.timeEntry.get()
path = self.pathEntry.get()
self.dbconf.update(thread=threadnum,time=timenum,path=path)
tk.messagebox.showinfo(title='配置', message='配置信息更新成功!')
def start_spider(self):
# TODO: 获取所有的配置信息函数。
self.errMessage.put('开始新闻数据采集')
self.startBtn.config(state=tk.DISABLED)
t = threading.Thread(target=self._temp_t, args=())
# t.daemon=True
t.start()
print('启动线程')
def clear_product(self):
if tk.messagebox.askyesno(title='删除', message='这将清空缓存数据,是否确定删除?'):
self.errMessage.put('开始清除数据库缓存')
self.dburl.delete_all({})
self.errMessage.put('清除数据库缓存结束')
tk.messagebox.showinfo(title='恭喜', message='清除数据库缓存结束')
# 清空数据库
def clearDB(self):
if tk.messagebox.askyesno(title='删除', message='这将清空所有的数据,是否确定删除?'):
if tk.messagebox.askyesno(title='再次确认', message='清空数据后请重启软件,是否确定删除?'):
self.dbconf.delete_all({})
self.dburl.delete_all({})
self.dbarticle.delete_all({})
self.errMessage.put('清除数据库所有数据')
self.errMessage.put('请重新启动软件,加载配置')
self.window.update()
tk.messagebox.showinfo(title='恭喜', message='所有数据清除完成!请重新启动软件,加载配置')
def log_queue(self):
while True:
log = self.logMessage.get()
date = datetime.now().strftime("%m-%d %H:%M:%S")
self.logInformation_Window.insert(END, '[{date}][{log}]'.format(date=date, log=log) + '\n')
self.logInformation_Window.see(END)
# self.logMessage.task_done()
def errlog_queue(self):
while True:
log = self.errMessage.get()
if log==1:
self.exportDbBtn.config(state=tk.ACTIVE)
date = datetime.now().strftime("%m-%d %H:%M:%S")
self.errorInformation_Window.insert(END, '[{date}][{log}]'.format(date=date, log=log) + '\n')
self.errorInformation_Window.see(END)
def show_logs(self):
Tlog_queue = threading.Thread(target=self.log_queue, args=())
Terrlog_queue = threading.Thread(target=self.errlog_queue, args=())
Tlog_queue.daemon = True
Tlog_queue.start()
Terrlog_queue.daemon = True
Terrlog_queue.start()
# self.logMessage.join()
def show_Description(self):
Description(self.window)
def show_Version(self):
Version(self.window)
def show_Developer(self):
Developer(self.window)
# 使用说明界面
class Description():
'''
软件描述说明介绍界面
'''
def __init__(self, master):
self.master = master
self.window = tk.Toplevel(master)
self.window.wm_attributes('-topmost', 1)
sw = self.window.winfo_screenwidth()
sh = self.window.winfo_screenheight()
ww = 650
wh = 720
x = (sw - ww) / 3
y = (sh - wh) / 3
self.window.geometry('%dx%d+%d+%d' % (ww, wh, x, y)) # 父容器大小
self.window.title('使用说明')
self.create_page()
def create_page(self):
Dev = tk.LabelFrame(self.window, text="关于使用说明", padx=10, pady=5) # 水平,垂直方向上的边距均为 10
Dev.place(x=50, y=50)
text = "【使用前仔细阅读使用说明】 \n\n" \
"使用说明\n" \
"本项目采用多线程爬取新闻网咨询,爬取速度快,效率高。\n" \
"根据数据库能做到新闻去重,不去爬取已经爬取过的新闻\n\n" \
"**注意事项**\n\n" \
"- 爬取之前检测数据库是否开启成功\n\n" \
"- 爬取频率:为多久进行一次爬取,默认数值60s,可以根据需求设置\n5分钟=5*60=300秒,时间间隔太小会封ip\n\n" \
"- 爬取线程: 爬取的线程与电脑的性能有关、一般电脑10个线程,\n电脑性能高可以开50、100个\n\n"\
"- 爬取的路径:爬取路径错误或者路径不设置将会文件将导出到news文件夹下面\n\n"\
"- 每次修改配置后,可以需要更新配置,\n\n"\
"- 清除缓存后,将删除所有的爬取信息\n\n"\
"- 清空数据库后,将删除所有的的数据库\n\n"\
"- 建议每隔5天左右清空一次数据库,将减少电脑压力\n\n"\
"- 关闭程序后结束爬取\n\n"\
" \n"\
"- 祝你使用愉快\n"\
tk.Label(Dev, text=text, justify='left').grid(column=0, row=0, sticky='w', pady=5, padx=5) # 添加用户账号
# 版本说明界面
class Version():
'''
软件版本说明介绍界面
'''
def __init__(self, master):
self.master = master
self.window = tk.Toplevel(master)
self.window.wm_attributes('-topmost', 1)
sw = self.window.winfo_screenwidth()
sh = self.window.winfo_screenheight()
ww = 400
wh = 300
x = (sw - ww) / 3
y = (sh - wh) / 3
self.window.geometry('%dx%d+%d+%d' % (ww, wh, x, y)) # 父容器大小
self.window.title('软件版本')
self.create_page()
def create_page(self):
Dev = tk.LabelFrame(self.window, text="关于版本更新", padx=10, pady=5) # 水平,垂直方向上的边距均为 10
Dev.place(x=50, y=50)
text = " 2019年5月 10日 版本:V1.0 正式版\n " \
" 2019年5月 09日 版本:V0.2\n "
tk.Label(Dev, text=text).grid(column=0, row=0, sticky='w', pady=5, padx=5) # 添加用户账号
# 开发者说明界面
class Developer():
'''
软件开发者介绍界面
'''
def __init__(self, master):
self.master = master
self.window = tk.Toplevel(master)
self.window.wm_attributes('-topmost', 1)
sw = self.window.winfo_screenwidth()
sh = self.window.winfo_screenheight()
ww = 400
wh = 300
x = (sw - ww) / 3
y = (sh - wh) / 3
self.window.geometry('%dx%d+%d+%d' % (ww, wh, x, y)) # 父容器大小
self.window.title('开发者')
self.create_page()
def create_page(self):
Dev = tk.LabelFrame(self.window, text="关于开发者", padx=10, pady=5) # 水平,垂直方向上的边距均为 10
Dev.place(x=50, y=50)
text = " 作者:AJay13\n" \
" 技能:熟悉各项爬虫与反爬虫,数据清洗,\n 网站搭建,软件编写\n" \
" 联系:BoeSKh5446sa23sadKJH84ads5\n"
tk.Label(Dev, text=text, justify='left').grid(column=0, row=0, sticky='w', pady=5, padx=5) # 添加用户账号
# 版本测试时间
def test_time(over_time):
from datetime import datetime
d2 = datetime.strptime(over_time, '%Y-%m-%d %H:%M:%S')
now = datetime.now()
if d2 > now:
return True
else:
return False
if __name__ == '__main__':
if test_time('2020-5-11 16:00:00'): # 测试授权日期
window = tk.Tk() # 父容器
print('开始')
window.title("新闻网采集器器定制版ByAjay13") # 父容器标题
basePath = os.path.abspath(os.path.dirname(__file__))
print('base_path基础路径',basePath)
if not os.path.exists(os.path.join(basePath, 'temp')):
os.mkdir(os.path.join(basePath, 'temp'))
if not os.path.exists(os.path.join(basePath, 'log')):
os.mkdir(os.path.join(basePath, 'log'))
mongod = os.path.join(basePath, 'bin', 'mongod.exe')
dbpath = os.path.join(basePath, 'temp')
logpath = os.path.join(basePath, 'log', 'mongodb.log')
#'D:\mongodb\bin\mongod.exe --dbpath D:\mongodb\xianyudb --logpath D:\mongodb\tb_log\MongoDB.log --directoryperdb --serviceName mongodb_tb --install'
if not os.path.exists(logpath):
os.system(
'{} --dbpath {} --logpath {} --directoryperdb --serviceName mongodb --install'.format(mongod, dbpath,
logpath))
os.system('net start mongodb')
else:
os.system('net start mongodb')
MainPage(window)
# 前提配置
# 配置mongodb为数据服务 初始化配置服务
'''
启动服务器服务
尝试链接数据库,搜寻配置项中db=1.链接不成功
alert 弹出数据库配置错误,尝试自动初始化,或联系管理员
1.创建本地mongodb的数据库文件夹
2.创建本地mongodb的数据库日志的文件夹
3.使用配置服务的命令
4.启动服务
5.数据库配置项中插入db为1
服务正常启动,tk面板加载配置项
异步爬虫线程启动,按照每隔10秒读取配置项内容。然后加载到进程中
关键字为:start == 1 开始加入爬取队列
'''
print('监听')
window.mainloop()
else:
window = tk.Tk() # 父容器
window.title("新闻网采集器器定制版ByAjay13") # 父容器标题
window.wm_attributes('-topmost', 1)
sw = window.winfo_screenwidth()
sh = window.winfo_screenheight()
ww = 400
wh = 300
x = (sw - ww) / 3
y = (sh - wh) / 3
window.geometry('%dx%d+%d+%d' % (ww, wh, x, y)) # 父容器大小
Dev = tk.LabelFrame(window, text="授权超时", padx=10, pady=5) # 水平,垂直方向上的边距均为 10
Dev.place(x=50, y=50)
text = " 你已经超出授权使用期限\n" \
" 请联系管理员进行提权\n \n" \
" 联系:BoeSKh5446sa23sadKJH84ads5\n"
tk.Label(Dev, text=text, justify='left').grid(column=0, row=0, sticky='w', pady=5, padx=5) # 添加用户账号
window.mainloop()
|
buildroot/utils/checkpackagelib/lib_mk.py
|
efficiosoft/hassos
| 617 |
130982
|
<reponame>efficiosoft/hassos
# See utils/checkpackagelib/readme.txt before editing this file.
# There are already dependency checks during the build, so below check
# functions don't need to check for things already checked by exploring the
# menu options using "make menuconfig" and by running "make" with appropriate
# packages enabled.
import re
from base import _CheckFunction
from lib import ConsecutiveEmptyLines # noqa: F401
from lib import EmptyLastLine # noqa: F401
from lib import NewlineAtEof # noqa: F401
from lib import TrailingSpace # noqa: F401
class Indent(_CheckFunction):
COMMENT = re.compile("^\s*#")
CONDITIONAL = re.compile("^\s*(ifeq|ifneq|endif)\s")
ENDS_WITH_BACKSLASH = re.compile(r"^[^#].*\\$")
END_DEFINE = re.compile("^\s*endef\s")
MAKEFILE_TARGET = re.compile("^[^# \t]+:\s")
START_DEFINE = re.compile("^\s*define\s")
def before(self):
self.define = False
self.backslash = False
self.makefile_target = False
def check_line(self, lineno, text):
if self.START_DEFINE.search(text):
self.define = True
return
if self.END_DEFINE.search(text):
self.define = False
return
expect_tabs = False
if self.define or self.backslash or self.makefile_target:
expect_tabs = True
if self.CONDITIONAL.search(text):
expect_tabs = False
# calculate for next line
if self.ENDS_WITH_BACKSLASH.search(text):
self.backslash = True
else:
self.backslash = False
if self.MAKEFILE_TARGET.search(text):
self.makefile_target = True
return
if text.strip() == "":
self.makefile_target = False
return
# comment can be indented or not inside define ... endef, so ignore it
if self.define and self.COMMENT.search(text):
return
if expect_tabs:
if not text.startswith("\t"):
return ["{}:{}: expected indent with tabs"
.format(self.filename, lineno),
text]
else:
if text.startswith("\t"):
return ["{}:{}: unexpected indent with tabs"
.format(self.filename, lineno),
text]
class PackageHeader(_CheckFunction):
def before(self):
self.skip = False
def check_line(self, lineno, text):
if self.skip or lineno > 6:
return
if lineno in [1, 5]:
if lineno == 1 and text.startswith("include "):
self.skip = True
return
if text.rstrip() != "#" * 80:
return ["{}:{}: should be 80 hashes ({}#writing-rules-mk)"
.format(self.filename, lineno, self.url_to_manual),
text,
"#" * 80]
elif lineno in [2, 4]:
if text.rstrip() != "#":
return ["{}:{}: should be 1 hash ({}#writing-rules-mk)"
.format(self.filename, lineno, self.url_to_manual),
text]
elif lineno == 6:
if text.rstrip() != "":
return ["{}:{}: should be a blank line ({}#writing-rules-mk)"
.format(self.filename, lineno, self.url_to_manual),
text]
class RemoveDefaultPackageSourceVariable(_CheckFunction):
packages_that_may_contain_default_source = ["binutils", "gcc", "gdb"]
PACKAGE_NAME = re.compile("/([^/]+)\.mk")
def before(self):
package = self.PACKAGE_NAME.search(self.filename).group(1)
package_upper = package.replace("-", "_").upper()
self.package = package
self.FIND_SOURCE = re.compile(
"^{}_SOURCE\s*=\s*{}-\$\({}_VERSION\)\.tar\.gz"
.format(package_upper, package, package_upper))
def check_line(self, lineno, text):
if self.FIND_SOURCE.search(text):
if self.package in self.packages_that_may_contain_default_source:
return
return ["{}:{}: remove default value of _SOURCE variable "
"({}#generic-package-reference)"
.format(self.filename, lineno, self.url_to_manual),
text]
class SpaceBeforeBackslash(_CheckFunction):
TAB_OR_MULTIPLE_SPACES_BEFORE_BACKSLASH = re.compile(r"^.*( |\t ?)\\$")
def check_line(self, lineno, text):
if self.TAB_OR_MULTIPLE_SPACES_BEFORE_BACKSLASH.match(text.rstrip()):
return ["{}:{}: use only one space before backslash"
.format(self.filename, lineno),
text]
class TrailingBackslash(_CheckFunction):
ENDS_WITH_BACKSLASH = re.compile(r"^[^#].*\\$")
def before(self):
self.backslash = False
def check_line(self, lineno, text):
last_line_ends_in_backslash = self.backslash
# calculate for next line
if self.ENDS_WITH_BACKSLASH.search(text):
self.backslash = True
self.lastline = text
return
self.backslash = False
if last_line_ends_in_backslash and text.strip() == "":
return ["{}:{}: remove trailing backslash"
.format(self.filename, lineno - 1),
self.lastline]
class TypoInPackageVariable(_CheckFunction):
ALLOWED = re.compile("|".join([
"ACLOCAL_DIR",
"ACLOCAL_HOST_DIR",
"BR_CCACHE_INITIAL_SETUP",
"BR_LIBC",
"BR_NO_CHECK_HASH_FOR",
"LINUX_EXTENSIONS",
"LINUX_POST_PATCH_HOOKS",
"LINUX_TOOLS",
"LUA_RUN",
"MKFS_JFFS2",
"MKIMAGE_ARCH",
"PACKAGES_PERMISSIONS_TABLE",
"PKG_CONFIG_HOST_BINARY",
"SUMTOOL",
"TARGET_FINALIZE_HOOKS",
"TARGETS_ROOTFS",
"XTENSA_CORE_NAME"]))
PACKAGE_NAME = re.compile("/([^/]+)\.mk")
VARIABLE = re.compile("^([A-Z0-9_]+_[A-Z0-9_]+)\s*(\+|)=")
def before(self):
package = self.PACKAGE_NAME.search(self.filename).group(1)
package = package.replace("-", "_").upper()
# linux tools do not use LINUX_TOOL_ prefix for variables
package = package.replace("LINUX_TOOL_", "")
# linux extensions do not use LINUX_EXT_ prefix for variables
package = package.replace("LINUX_EXT_", "")
self.package = package
self.REGEX = re.compile("^(HOST_|ROOTFS_)?({}_[A-Z0-9_]+)".format(package))
self.FIND_VIRTUAL = re.compile(
"^{}_PROVIDES\s*(\+|)=\s*(.*)".format(package))
self.virtual = []
def check_line(self, lineno, text):
m = self.VARIABLE.search(text)
if m is None:
return
variable = m.group(1)
# allow to set variables for virtual package this package provides
v = self.FIND_VIRTUAL.search(text)
if v:
self.virtual += v.group(2).upper().split()
return
for virtual in self.virtual:
if variable.startswith("{}_".format(virtual)):
return
if self.ALLOWED.match(variable):
return
if self.REGEX.search(text) is None:
return ["{}:{}: possible typo: {} -> *{}*"
.format(self.filename, lineno, variable, self.package),
text]
class UselessFlag(_CheckFunction):
DEFAULT_AUTOTOOLS_FLAG = re.compile("^.*{}".format("|".join([
"_AUTORECONF\s*=\s*NO",
"_LIBTOOL_PATCH\s*=\s*YES"])))
DEFAULT_GENERIC_FLAG = re.compile("^.*{}".format("|".join([
"_INSTALL_IMAGES\s*=\s*NO",
"_INSTALL_REDISTRIBUTE\s*=\s*YES",
"_INSTALL_STAGING\s*=\s*NO",
"_INSTALL_TARGET\s*=\s*YES"])))
END_CONDITIONAL = re.compile("^\s*(endif)")
START_CONDITIONAL = re.compile("^\s*(ifeq|ifneq)")
def before(self):
self.conditional = 0
def check_line(self, lineno, text):
if self.START_CONDITIONAL.search(text):
self.conditional += 1
return
if self.END_CONDITIONAL.search(text):
self.conditional -= 1
return
# allow non-default conditionally overridden by default
if self.conditional > 0:
return
if self.DEFAULT_GENERIC_FLAG.search(text):
return ["{}:{}: useless default value ({}#"
"_infrastructure_for_packages_with_specific_build_systems)"
.format(self.filename, lineno, self.url_to_manual),
text]
if self.DEFAULT_AUTOTOOLS_FLAG.search(text) and not text.lstrip().startswith("HOST_"):
return ["{}:{}: useless default value "
"({}#_infrastructure_for_autotools_based_packages)"
.format(self.filename, lineno, self.url_to_manual),
text]
|
calendarserver/accesslog.py
|
backwardn/ccs-calendarserver
| 462 |
130994
|
##
# Copyright (c) 2006-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Access logs.
"""
__all__ = [
"DirectoryLogWrapperResource",
"RotatingFileAccessLoggingObserver",
"AMPCommonAccessLoggingObserver",
"AMPLoggingFactory",
]
import collections
import datetime
import json
import os
try:
import psutil
except ImportError:
psutil = None
from sys import platform
import time
from calendarserver.logAnalysis import getAdjustedMethodName
from twext.python.log import Logger
from twext.who.idirectory import RecordType
from txweb2 import iweb
from txweb2.log import BaseCommonAccessLoggingObserver
from txweb2.log import LogWrapperResource
from twisted.internet import protocol, task
from twisted.protocols import amp
from twistedcaldav.config import config
log = Logger()
class DirectoryLogWrapperResource(LogWrapperResource):
def __init__(self, resource, directory):
super(DirectoryLogWrapperResource, self).__init__(resource)
self.directory = directory
def getDirectory(self):
return self.directory
class CommonAccessLoggingObserverExtensions(BaseCommonAccessLoggingObserver):
"""
A base class for our extension to the L{BaseCommonAccessLoggingObserver}
"""
def emit(self, eventDict):
format = None
formatArgs = None
if eventDict.get("interface") is iweb.IRequest:
request = eventDict["request"]
response = eventDict["response"]
loginfo = eventDict["loginfo"]
# Try to determine authentication and authorization identifiers
uid = "-"
if getattr(request, "authnUser", None) is not None:
def convertPrincipaltoShortName(principal):
if principal.record.recordType == RecordType.user:
return principal.record.shortNames[0]
else:
return "({rtype}){name}".format(rtype=principal.record.recordType, name=principal.record.shortNames[0],)
uidn = convertPrincipaltoShortName(request.authnUser)
uidz = convertPrincipaltoShortName(request.authzUser)
if uidn != uidz:
uid = '"{authn} as {authz}"'.format(authn=uidn, authz=uidz,)
else:
uid = uidn
#
# For some methods which basically allow you to tunnel a
# custom request (eg. REPORT, POST), the method name
# itself doesn't tell you much about what action is being
# requested. This allows a method to tack a submethod
# attribute to the request, so we can provide a little
# more detail here.
#
if config.EnableExtendedAccessLog and hasattr(request, "submethod"):
method = "%s(%s)" % (request.method, request.submethod)
else:
method = request.method
# Standard Apache access log fields
format = (
'%(host)s - %(uid)s [%(date)s]'
' "%(method)s %(uri)s HTTP/%(protocolVersion)s"'
' %(statusCode)s %(bytesSent)d'
' "%(referer)s" "%(userAgent)s"'
)
formatArgs = {
"host": request.remoteAddr.host,
"uid": uid,
"date": self.logDateString(response.headers.getHeader("date", 0)),
"method": method,
"uri": request.uri.replace('"', "%22"),
"protocolVersion": ".".join(str(x) for x in request.clientproto),
"statusCode": response.code,
"bytesSent": loginfo.bytesSent,
"referer": request.headers.getHeader("referer", "-"),
"userAgent": request.headers.getHeader("user-agent", "-"),
}
# Add extended items to format and formatArgs
if config.EnableExtendedAccessLog:
format += ' i=%(serverInstance)s'
formatArgs["serverInstance"] = config.LogID if config.LogID else "0"
if request.chanRequest: # This can be None during tests
format += ' or=%(outstandingRequests)s'
formatArgs["outstandingRequests"] = request.chanRequest.channel.factory.outstandingRequests
# Tags for time stamps collected along the way - the first one in the list is the initial
# time for request creation - we use that to track the entire request/response time
nowtime = time.time()
if config.EnableExtendedTimingAccessLog:
basetime = request.timeStamps[0][1]
request.timeStamps[0] = ("t", time.time(),)
for tag, timestamp in request.timeStamps:
format += " %s=%%(%s).1f" % (tag, tag,)
formatArgs[tag] = (timestamp - basetime) * 1000
if tag != "t":
basetime = timestamp
if len(request.timeStamps) > 1:
format += " t-log=%(t-log).1f"
formatArgs["t-log"] = (timestamp - basetime) * 1000
else:
format += " t=%(t).1f"
formatArgs["t"] = (nowtime - request.timeStamps[0][1]) * 1000
if hasattr(request, "extendedLogItems"):
for k, v in sorted(request.extendedLogItems.iteritems(), key=lambda x: x[0]):
k = str(k).replace('"', "%22")
v = str(v).replace('"', "%22")
if " " in v:
v = '"%s"' % (v,)
format += " %s=%%(%s)s" % (k, k,)
formatArgs[k] = v
# Add the name of the XML error element for debugging purposes
if hasattr(response, "error"):
format += " err=%(err)s"
formatArgs["err"] = response.error.qname()[1]
fwdHeaders = request.headers.getRawHeaders("x-forwarded-for", "")
if fwdHeaders:
# Limit each x-forwarded-header to 50 in case someone is
# trying to overwhelm the logs
forwardedFor = ",".join([hdr[:50] for hdr in fwdHeaders])
forwardedFor = forwardedFor.replace(" ", "")
format += " fwd=%(fwd)s"
formatArgs["fwd"] = forwardedFor
if formatArgs["host"] == "0.0.0.0":
fwdHeaders = request.headers.getRawHeaders("x-forwarded-for", "")
if fwdHeaders:
formatArgs["host"] = fwdHeaders[-1].split(",")[-1].strip()
format += " unix=%(unix)s"
formatArgs["unix"] = "true"
elif "overloaded" in eventDict:
overloaded = eventDict.get("overloaded")
format = (
'%(host)s - %(uid)s [%(date)s]'
' "%(method)s"'
' %(statusCode)s %(bytesSent)d'
' "%(referer)s" "%(userAgent)s"'
)
formatArgs = {
"host": overloaded.transport.hostname,
"uid": "-",
"date": self.logDateString(time.time()),
"method": "???",
"uri": "",
"protocolVersion": "",
"statusCode": 503,
"bytesSent": 0,
"referer": "-",
"userAgent": "-",
}
if config.EnableExtendedAccessLog:
format += ' p=%(serverPort)s'
formatArgs["serverPort"] = overloaded.transport.server.port
format += ' or=%(outstandingRequests)s'
formatArgs["outstandingRequests"] = overloaded.outstandingRequests
# Write anything we got to the log and stats
if format is not None:
# sanitize output to mitigate log injection
for k, v in formatArgs.items():
if not isinstance(v, basestring):
continue
v = v.replace("\r", "\\r")
v = v.replace("\n", "\\n")
v = v.replace("\"", "\\\"")
formatArgs[k] = v
formatArgs["type"] = "access-log"
formatArgs["log-format"] = format
self.logStats(formatArgs)
class RotatingFileAccessLoggingObserver(CommonAccessLoggingObserverExtensions):
"""
Class to do "apache" style access logging to a rotating log file. The log
file is rotated after midnight each day.
This class also currently handles the collection of system and log statistics.
"""
def __init__(self, logpath):
self.logpath = logpath
self.systemStats = None
self.statsByMinute = []
self.stats1m = None
self.stats5m = None
self.stats1h = None
def accessLog(self, message, allowrotate=True):
"""
Log a message to the file and possibly rotate if date has changed.
@param message: C{str} for the message to log.
@param allowrotate: C{True} if log rotate allowed, C{False} to log to current file
without testing for rotation.
"""
if self.shouldRotate() and allowrotate:
self.flush()
self.rotate()
if isinstance(message, unicode):
message = message.encode("utf-8")
self.f.write(message + "\n")
def start(self):
"""
Start logging. Open the log file and log an "open" message.
"""
super(RotatingFileAccessLoggingObserver, self).start()
self._open()
self.accessLog("Log opened - server start: [%s]." % (datetime.datetime.now().ctime(),))
def stop(self):
"""
Stop logging. Close the log file and log an "open" message.
"""
self.accessLog("Log closed - server stop: [%s]." % (datetime.datetime.now().ctime(),), False)
super(RotatingFileAccessLoggingObserver, self).stop()
self._close()
if self.systemStats is not None:
self.systemStats.stop()
def _open(self):
"""
Open the log file.
"""
self.f = open(self.logpath, "a", 1)
self.lastDate = self.toDate(os.stat(self.logpath)[8])
def _close(self):
"""
Close the log file.
"""
self.f.close()
def flush(self):
"""
Flush the log file.
"""
self.f.flush()
def shouldRotate(self):
"""
Rotate when the date has changed since last write
"""
if config.RotateAccessLog:
return self.toDate() > self.lastDate
else:
return False
def toDate(self, *args):
"""
Convert a unixtime to (year, month, day) localtime tuple,
or return the current (year, month, day) localtime tuple.
This function primarily exists so you may overload it with
gmtime, or some cruft to make unit testing possible.
"""
# primarily so this can be unit tested easily
return time.localtime(*args)[:3]
def suffix(self, tupledate):
"""
Return the suffix given a (year, month, day) tuple or unixtime
"""
try:
return "_".join(map(str, tupledate))
except:
# try taking a float unixtime
return "_".join(map(str, self.toDate(tupledate)))
def rotate(self):
"""
Rotate the file and create a new one.
If it's not possible to open new logfile, this will fail silently,
and continue logging to old logfile.
"""
newpath = "%s.%s" % (self.logpath, self.suffix(self.lastDate))
if os.path.exists(newpath):
log.info("Cannot rotate log file to '{path}' because it already exists.", path=newpath)
return
self.accessLog("Log closed - rotating: [%s]." % (datetime.datetime.now().ctime(),), False)
log.info("Rotating log file to: '{path}'", path=newpath, system="Logging")
self.f.close()
os.rename(self.logpath, newpath)
self._open()
self.accessLog("Log opened - rotated: [%s]." % (datetime.datetime.now().ctime(),), False)
def logStats(self, stats):
"""
Update stats
"""
# Only use the L{SystemMonitor} when stats socket is in use
if config.Stats.EnableUnixStatsSocket or config.Stats.EnableTCPStatsSocket:
# Initialize a L{SystemMonitor} on the first call
if self.systemStats is None:
self.systemStats = SystemMonitor()
# Currently only storing stats for access log type
if "type" not in stats or stats["type"] != "access-log":
return
currentStats = self.ensureSequentialStats()
self.updateStats(currentStats, stats)
if stats["type"] == "access-log":
self.accessLog(stats["log-format"] % stats)
def getStats(self):
"""
Return the stats
"""
# Only use the L{SystemMonitor} when stats socket is in use
if not config.Stats.EnableUnixStatsSocket and not config.Stats.EnableTCPStatsSocket:
return {}
# Initialize a L{SystemMonitor} on the first call
if self.systemStats is None:
self.systemStats = SystemMonitor()
# The current stats
currentStats = self.ensureSequentialStats()
# Get previous minute details
if self.stats1m is None:
index = min(2, len(self.statsByMinute))
if index > 0:
self.stats1m = self.statsByMinute[-index][1]
else:
self.stats1m = self.initStats()
# Do five minute aggregate
if self.stats5m is None:
self.stats5m = self.initStats()
index = min(6, len(self.statsByMinute))
for i in range(-index, -1):
stat = self.statsByMinute[i][1]
self.mergeStats(self.stats5m, stat)
# Do one hour aggregate
if self.stats1h is None:
self.stats1h = self.initStats()
index = min(61, len(self.statsByMinute))
for i in range(-index, -1):
stat = self.statsByMinute[i][1]
self.mergeStats(self.stats1h, stat)
printStats = {
"system": self.systemStats.items,
"current": currentStats,
"1m": self.stats1m,
"5m": self.stats5m,
"1h": self.stats1h,
}
return printStats
def ensureSequentialStats(self):
"""
Make sure the list of timed stats is contiguous wrt time.
"""
dtindex = int(time.time() / 60.0) * 60
if len(self.statsByMinute) > 0:
if self.statsByMinute[-1][0] != dtindex:
oldindex = self.statsByMinute[-1][0]
if oldindex != dtindex:
# Adding a new minutes worth of data - clear out any cached
# historical data
self.stats1m = None
self.stats5m = None
self.stats1h = None
# Add enough new stats to account for any idle minutes between
# the last recorded stat and the current one
while oldindex != dtindex:
oldindex += 60
self.statsByMinute.append((oldindex, self.initStats(),))
else:
self.statsByMinute.append((dtindex, self.initStats(),))
self.stats1m = None
self.stats5m = None
self.stats1h = None
# We only need up to 1 hour's worth of data, so truncate the cached data
# to avoid filling memory
threshold = 65
if len(self.statsByMinute) > threshold:
self.statsByMinute = self.statsByMinute[-threshold:]
return self.statsByMinute[-1][1]
def initStats(self):
def initTimeHistogram():
return {
"<10ms": 0,
"10ms<->100ms": 0,
"100ms<->1s": 0,
"1s<->10s": 0,
"10s<->30s": 0,
"30s<->60s": 0,
">60s": 0,
"Over 1s": 0,
"Over 10s": 0,
}
return {
"requests": 0,
"method": collections.defaultdict(int),
"method-t": collections.defaultdict(float),
"500": 0,
"401": 0,
"t": 0.0,
"t-resp-wr": 0.0,
"slots": 0,
"max-slots": 0,
"T": initTimeHistogram(),
"T-RESP-WR": initTimeHistogram(),
"T-MAX": 0.0,
"cpu": self.systemStats.items["cpu use"],
}
def updateStats(self, current, stats):
# Gather specific information and aggregate into our persistent stats
adjustedMethod = getAdjustedMethodName(stats)
if current["requests"] == 0:
current["cpu"] = 0.0
current["requests"] += 1
current["method"][adjustedMethod] += 1
current["method-t"][adjustedMethod] += stats.get("t", 0.0)
if stats["statusCode"] >= 500:
current["500"] += 1
elif stats["statusCode"] == 401:
current["401"] += 1
current["t"] += stats.get("t", 0.0)
current["t-resp-wr"] += stats.get("t-resp-wr", 0.0)
current["slots"] += stats.get("outstandingRequests", 0)
current["max-slots"] = max(current["max-slots"], self.limiter.maxOutstandingRequests if hasattr(self, "limiter") else 0)
current["cpu"] += self.systemStats.items["cpu use"]
def histogramUpdate(t, key):
if t >= 60000.0:
current[key][">60s"] += 1
elif t >= 30000.0:
current[key]["30s<->60s"] += 1
elif t >= 10000.0:
current[key]["10s<->30s"] += 1
elif t >= 1000.0:
current[key]["1s<->10s"] += 1
elif t >= 100.0:
current[key]["100ms<->1s"] += 1
elif t >= 10.0:
current[key]["10ms<->100ms"] += 1
else:
current[key]["<10ms"] += 1
if t >= 1000.0:
current[key]["Over 1s"] += 1
elif t >= 10000.0:
current[key]["Over 10s"] += 1
t = stats.get("t", None)
if t is not None:
histogramUpdate(t, "T")
current["T-MAX"] = max(current["T-MAX"], t)
t = stats.get("t-resp-wr", None)
if t is not None:
histogramUpdate(t, "T-RESP-WR")
def mergeStats(self, current, stats):
# Gather specific information and aggregate into our persistent stats
if current["requests"] == 0:
current["cpu"] = 0.0
current["requests"] += stats["requests"]
for method in stats["method"].keys():
current["method"][method] += stats["method"][method]
for method in stats["method-t"].keys():
current["method-t"][method] += stats["method-t"][method]
current["500"] += stats["500"]
current["401"] += stats["401"]
current["t"] += stats["t"]
current["t-resp-wr"] += stats["t-resp-wr"]
current["slots"] += stats["slots"]
current["max-slots"] = max(current["max-slots"], stats["max-slots"])
current["cpu"] += stats["cpu"]
def histogramUpdate(t, key):
if t >= 60000.0:
current[key][">60s"] += 1
elif t >= 30000.0:
current[key]["30s<->60s"] += 1
elif t >= 10000.0:
current[key]["10s<->30s"] += 1
elif t >= 1000.0:
current[key]["1s<->10s"] += 1
elif t >= 100.0:
current[key]["100ms<->1s"] += 1
elif t >= 10.0:
current[key]["10ms<->100ms"] += 1
else:
current[key]["<10ms"] += 1
if t >= 1000.0:
current[key]["Over 1s"] += 1
elif t >= 10000.0:
current[key]["Over 10s"] += 1
for bin in stats["T"].keys():
current["T"][bin] += stats["T"][bin]
current["T-MAX"] = max(current["T-MAX"], stats["T-MAX"])
for bin in stats["T-RESP-WR"].keys():
current["T-RESP-WR"][bin] += stats["T-RESP-WR"][bin]
class SystemMonitor(object):
"""
Keeps track of system usage information. This installs a reactor task to
run about once per second and track system use.
"""
CPUStats = collections.namedtuple("CPUStats", ("total", "idle",))
def __init__(self):
self.items = {
"cpu count": psutil.cpu_count() if psutil is not None else -1,
"cpu use": 0.0,
"memory used": 0,
"memory percent": 0.0,
"start time": time.time(),
}
if psutil is not None:
times = psutil.cpu_times()
self.previous_cpu = SystemMonitor.CPUStats(sum(times), times.idle,)
else:
self.previous_cpu = SystemMonitor.CPUStats(0, 0)
self.task = task.LoopingCall(self.update)
self.task.start(1.0)
def stop(self):
"""
Just stop the task
"""
self.task.stop()
def update(self):
# CPU usage based on diff'ing CPU times
if psutil is not None:
times = psutil.cpu_times()
cpu_now = SystemMonitor.CPUStats(sum(times), times.idle,)
try:
self.items["cpu use"] = 100.0 * (1.0 - (cpu_now.idle - self.previous_cpu.idle) / (cpu_now.total - self.previous_cpu.total))
except ZeroDivisionError:
self.items["cpu use"] = 0.0
self.previous_cpu = cpu_now
# Memory usage
if psutil is not None and 'freebsd' not in platform:
mem = psutil.virtual_memory()
self.items["memory used"] = mem.used
self.items["memory percent"] = mem.percent
class LogStats(amp.Command):
arguments = [("message", amp.String())]
class AMPCommonAccessLoggingObserver(CommonAccessLoggingObserverExtensions):
def __init__(self):
self.protocol = None
self._buffer = []
def flushBuffer(self):
if self._buffer:
for msg in self._buffer:
self.logStats(msg)
def addClient(self, connectedClient):
"""
An AMP client connected; hook it up to this observer.
"""
self.protocol = connectedClient
self.flushBuffer()
def logStats(self, message):
"""
Log server stats via the remote AMP Protocol
"""
if self.protocol is not None:
message = json.dumps(message)
if isinstance(message, unicode):
message = message.encode("utf-8")
d = self.protocol.callRemote(LogStats, message=message)
d.addErrback(log.error)
else:
self._buffer.append(message)
class AMPLoggingProtocol(amp.AMP):
"""
A server side protocol for logging to the given observer.
"""
def __init__(self, observer):
self.observer = observer
super(AMPLoggingProtocol, self).__init__()
def logStats(self, message):
stats = json.loads(message)
self.observer.logStats(stats)
return {}
LogStats.responder(logStats)
class AMPLoggingFactory(protocol.ServerFactory):
def __init__(self, observer):
self.observer = observer
def doStart(self):
self.observer.start()
def doStop(self):
self.observer.stop()
def buildProtocol(self, addr):
return AMPLoggingProtocol(self.observer)
|
examples/unfinished/flp_nonlinear.py
|
gasse/PySCIPOpt
| 311 |
130998
|
<reponame>gasse/PySCIPOpt
# todo
"""
flp_nonlinear.py: piecewise linear model for the capacitated facility location problem
minimize the total (weighted) travel cost from n customers to a
given set of facilities, with fixed costs and limited capacities;
costs are nonlinear (square root of the total quantity serviced
by a facility).
Approaches: use
- convex combination
- multiple selection
formulations defined in 'piecewise.py'.
Copyright (c) by <NAME> and <NAME>, 2012
"""
import math
import random
from pyscipopt import Model, quicksum, multidict
from piecewise import *
def flp_nonlinear_mselect(I,J,d,M,f,c,K):
"""flp_nonlinear_mselect -- use multiple selection model
Parameters:
- I: set of customers
- J: set of facilities
- d[i]: demand for customer i
- M[j]: capacity of facility j
- f[j]: fixed cost for using a facility in point j
- c[i,j]: unit cost of servicing demand point i from facility j
- K: number of linear pieces for approximation of non-linear cost function
Returns a model, ready to be solved.
"""
a,b = {},{}
for j in J:
U = M[j]
L = 0
width = U/float(K)
a[j] = [k*width for k in range(K+1)]
b[j] = [f[j]*math.sqrt(value) for value in a[j]]
model = Model("nonlinear flp -- piecewise linear version with multiple selection")
x = {}
for j in J:
for i in I:
x[i,j] = model.addVar(vtype="C", name="x(%s,%s)"%(i,j)) # i's demand satisfied from j
# total volume transported from plant j, corresponding (linearized) cost, selection variable:
X,F,z = {},{},{}
for j in J:
# add constraints for linking piecewise linear part:
X[j],F[j],z[j] = mult_selection(model,a[j],b[j])
X[j].ub = M[j]
# for i in I:
# model.addCons(
# x[i,j] <= \
# quicksum(min(d[i],a[j][k+1]) * z[j][k] for k in range(K)),\
# "Strong(%s,%s)"%(i,j))
# constraints for customer's demand satisfaction
for i in I:
model.addCons(quicksum(x[i,j] for j in J) == d[i], "Demand(%s)"%i)
for j in J:
model.addCons(quicksum(x[i,j] for i in I) == X[j], "Capacity(%s)"%j)
model.setObjective(quicksum(F[j] for j in J) +\
quicksum(c[i,j]*x[i,j] for j in J for i in I),\
"minimize")
model.data = x,X,F
return model
def flp_nonlinear_cc_dis_strong(I,J,d,M,f,c,K):
"""flp_nonlinear_bin -- use convex combination model, with binary variables
Parameters:
- I: set of customers
- J: set of facilities
- d[i]: demand for customer i
- M[j]: capacity of facility j
- f[j]: fixed cost for using a facility in point j
- c[i,j]: unit cost of servicing demand point i from facility j
- K: number of linear pieces for approximation of non-linear cost function
Returns a model, ready to be solved.
"""
a,b = {},{}
for j in J:
U = M[j]
L = 0
width = U/float(K)
a[j] = [k*width for k in range(K+1)]
b[j] = [f[j]*math.sqrt(value) for value in a[j]]
model = Model("nonlinear flp -- piecewise linear version with convex combination")
x = {}
for j in J:
for i in I:
x[i,j] = model.addVar(vtype="C", name="x(%s,%s)"%(i,j)) # i's demand satisfied from j
# total volume transported from plant j, corresponding (linearized) cost, selection variable:
X,F,z = {},{},{}
for j in J:
# add constraints for linking piecewise linear part:
X[j],F[j],z[j] = convex_comb_dis(model,a[j],b[j])
X[j].ub = M[j]
for i in I:
model.addCons(
x[i,j] <= \
quicksum(min(d[i],a[j][k+1]) * z[j][k] for k in range(K)),\
"Strong(%s,%s)"%(i,j))
# constraints for customer's demand satisfaction
for i in I:
model.addCons(quicksum(x[i,j] for j in J) == d[i], "Demand(%s)"%i)
for j in J:
model.addCons(quicksum(x[i,j] for i in I) == X[j], "Capacity(%s)"%j)
model.setObjective(quicksum(F[j] for j in J) +\
quicksum(c[i,j]*x[i,j] for j in J for i in I),\
"minimize")
model.data = x,X,F
return model
def flp_nonlinear_cc_dis(I,J,d,M,f,c,K):
"""flp_nonlinear_bin -- use convex combination model, with binary variables
Parameters:
- I: set of customers
- J: set of facilities
- d[i]: demand for customer i
- M[j]: capacity of facility j
- f[j]: fixed cost for using a facility in point j
- c[i,j]: unit cost of servicing demand point i from facility j
- K: number of linear pieces for approximation of non-linear cost function
Returns a model, ready to be solved.
"""
a,b = {},{}
for j in J:
U = M[j]
L = 0
width = U/float(K)
a[j] = [k*width for k in range(K+1)]
b[j] = [f[j]*math.sqrt(value) for value in a[j]]
model = Model("nonlinear flp -- piecewise linear version with convex combination")
x = {}
for j in J:
for i in I:
x[i,j] = model.addVar(vtype="C", name="x(%s,%s)"%(i,j)) # i's demand satisfied from j
# total volume transported from plant j, corresponding (linearized) cost, selection variable:
X,F,z = {},{},{}
for j in J:
# add constraints for linking piecewise linear part:
X[j],F[j],z[j] = convex_comb_dis(model,a[j],b[j])
X[j].ub = M[j]
# for i in I:
# model.addCons(
# x[i,j] <= \
# quicksum(min(d[i],a[j][k+1]) * z[j][k] for k in range(K)),\
# "Strong(%s,%s)"%(i,j))
# constraints for customer's demand satisfaction
for i in I:
model.addCons(quicksum(x[i,j] for j in J) == d[i], "Demand(%s)"%i)
for j in J:
model.addCons(quicksum(x[i,j] for i in I) == X[j], "Capacity(%s)"%j)
model.setObjective(quicksum(F[j] for j in J) +\
quicksum(c[i,j]*x[i,j] for j in J for i in I),\
"minimize")
model.data = x,X,F
return model
def flp_nonlinear_cc_dis_log(I,J,d,M,f,c,K):
"""flp_nonlinear_cc_dis_log -- convex combination model with logarithmic number of binary variables
Parameters:
- I: set of customers
- J: set of facilities
- d[i]: demand for customer i
- M[j]: capacity of facility j
- f[j]: fixed cost for using a facility in point j
- c[i,j]: unit cost of servicing demand point i from facility j
- K: number of linear pieces for approximation of non-linear cost function
Returns a model, ready to be solved.
"""
a,b = {},{}
for j in J:
U = M[j]
L = 0
width = U/float(K)
a[j] = [k*width for k in range(K+1)]
b[j] = [f[j]*math.sqrt(value) for value in a[j]]
model = Model("nonlinear flp -- convex combination model with logarithmic number of binary variables")
x = {}
for j in J:
for i in I:
x[i,j] = model.addVar(vtype="C", name="x(%s,%s)"%(i,j)) # i's demand satisfied from j
# total volume transported from plant j, corresponding (linearized) cost, selection variable:
X,F,yL,yR = {},{},{},{}
for j in J:
# add constraints for linking piecewise linear part:
X[j],F[j],yL[j],yR[j] = convex_comb_dis_log(model,a[j],b[j])
X[j].ub = M[j]
# for i in I:
# model.addCons(
# x[i,j] <= \
# quicksum(min(d[i],a[j][k+1]) * (yL[j][k]+yR[j][k]) for k in range(K)),\
# "Strong(%s,%s)"%(i,j))
# constraints for customer's demand satisfaction
for i in I:
model.addCons(quicksum(x[i,j] for j in J) == d[i], "Demand(%s)"%i)
for j in J:
model.addCons(quicksum(x[i,j] for i in I) == X[j], "Capacity(%s)"%j)
model.setObjective(quicksum(F[j] for j in J) +\
quicksum(c[i,j]*x[i,j] for j in J for i in I),\
"minimize")
model.data = x,X,F
return model
def flp_nonlinear_cc_agg(I,J,d,M,f,c,K):
"""flp_nonlinear_cc_agg -- aggregated convex combination model
Parameters:
- I: set of customers
- J: set of facilities
- d[i]: demand for customer i
- M[j]: capacity of facility j
- f[j]: fixed cost for using a facility in point j
- c[i,j]: unit cost of servicing demand point i from facility j
- K: number of linear pieces for approximation of non-linear cost function
Returns a model, ready to be solved.
"""
a,b = {},{}
for j in J:
U = M[j]
L = 0
width = U/float(K)
a[j] = [k*width for k in range(K+1)]
b[j] = [f[j]*math.sqrt(value) for value in a[j]]
model = Model("nonlinear flp -- piecewise linear aggregated convex combination")
x = {}
for j in J:
for i in I:
x[i,j] = model.addVar(vtype="C", name="x(%s,%s)"%(i,j)) # i's demand satisfied from j
# total volume transported from plant j, corresponding (linearized) cost, selection variable:
X,F,z = {},{},{}
for j in J:
# add constraints for linking piecewise linear part:
X[j],F[j],z[j] = convex_comb_agg(model,a[j],b[j])
X[j].ub = M[j]
# for i in I:
# model.addCons(
# x[i,j] <= \
# quicksum(min(d[i],a[j][k+1]) * z[j][k] for k in range(K)),\
# "Strong(%s,%s)"%(i,j))
# constraints for customer's demand satisfaction
for i in I:
model.addCons(quicksum(x[i,j] for j in J) == d[i], "Demand(%s)"%i)
for j in J:
model.addCons(quicksum(x[i,j] for i in I) == X[j], "Capacity(%s)"%j)
model.setObjective(quicksum(F[j] for j in J) +\
quicksum(c[i,j]*x[i,j] for j in J for i in I),\
"minimize")
model.data = x,X,F
return model
def flp_nonlinear_cc_agg_log(I,J,d,M,f,c,K):
"""flp_nonlinear_cc_agg_logg -- aggregated convex combination model, with log. binary variables
Parameters:
- I: set of customers
- J: set of facilities
- d[i]: demand for customer i
- M[j]: capacity of facility j
- f[j]: fixed cost for using a facility in point j
- c[i,j]: unit cost of servicing demand point i from facility j
- K: number of linear pieces for approximation of non-linear cost function
Returns a model, ready to be solved.
"""
a,b = {},{}
for j in J:
U = M[j]
L = 0
width = U/float(K)
a[j] = [k*width for k in range(K+1)]
b[j] = [f[j]*math.sqrt(value) for value in a[j]]
model = Model("nonlinear flp -- piecewise linear version with convex combination")
x = {}
for j in J:
for i in I:
x[i,j] = model.addVar(vtype="C", name="x(%s,%s)"%(i,j)) # i's demand satisfied from j
# total volume transported from plant j, corresponding (linearized) cost, selection variable:
X,F,y = {},{},{}
for j in J:
# add constraints for linking piecewise linear part:
X[j],F[j],y[j] = convex_comb_agg_log(model,a[j],b[j])
X[j].ub = M[j]
# for i in I:
# model.addCons(
# x[i,j] <= \
# quicksum(min(d[i],a[j][k+1]) * (y[j][k]+y[j][k+1]) for k in range(K)),\
# "Strong(%s,%s)"%(i,j))
# constraints for customer's demand satisfaction
for i in I:
model.addCons(quicksum(x[i,j] for j in J) == d[i], "Demand(%s)"%i)
for j in J:
model.addCons(quicksum(x[i,j] for i in I) == X[j], "Capacity(%s)"%j)
model.setObjective(quicksum(F[j] for j in J) +\
quicksum(c[i,j]*x[i,j] for j in J for i in I),\
"minimize")
model.data = x,X,F
return model
def flp_nonlinear_sos(I,J,d,M,f,c,K):
"""flp_nonlinear_sos -- use model with SOS constraints
Parameters:
- I: set of customers
- J: set of facilities
- d[i]: demand for customer i
- M[j]: capacity of facility j
- f[j]: fixed cost for using a facility in point j
- c[i,j]: unit cost of servicing demand point i from facility j
- K: number of linear pieces for approximation of non-linear cost function
Returns a model, ready to be solved.
"""
a,b = {},{}
for j in J:
U = M[j]
L = 0
width = U/float(K)
a[j] = [k*width for k in range(K+1)]
b[j] = [f[j]*math.sqrt(value) for value in a[j]]
model = Model("nonlinear flp -- use model with SOS constraints")
x = {}
for j in J:
for i in I:
x[i,j] = model.addVar(vtype="C", name="x(%s,%s)"%(i,j)) # i's demand satisfied from j
# total volume transported from plant j, corresponding (linearized) cost, selection variable:
X,F,z = {},{},{}
for j in J:
# add constraints for linking piecewise linear part:
X[j],F[j],z[j] = convex_comb_sos(model,a[j],b[j])
X[j].ub = M[j]
# for i in I:
# model.addCons(
# x[i,j] <= \
# quicksum(min(d[i],a[j][k+1]) * (z[j][k] + z[j][k+1])\
# for k in range(len(a[j])-1)),
# "Strong(%s,%s)"%(i,j))
# constraints for customer's demand satisfaction
for i in I:
model.addCons(quicksum(x[i,j] for j in J) == d[i], "Demand(%s)"%i)
for j in J:
model.addCons(quicksum(x[i,j] for i in I) == X[j], "Capacity(%s)"%j)
model.setObjective(quicksum(F[j] for j in J) +\
quicksum(c[i,j]*x[i,j] for j in J for i in I),\
"minimize")
model.data = x,X,F
return model
def distance(x1,y1,x2,y2):
return math.sqrt((x2-x1)**2 + (y2-y1)**2)
def make_data(n,m,same=True):
x,y = {},{}
if same == True:
I = range(1,n+1)
J = range(1,m+1)
for i in range(1,1+max(m,n)): # positions of the points in the plane
x[i] = random.random()
y[i] = random.random()
else:
I = range(1,n+1)
J = range(n+1,n+m+1)
for i in I: # positions of the points in the plane
x[i] = random.random()
y[i] = random.random()
for j in J: # positions of the points in the plane
x[j] = random.random()
y[j] = random.random()
f,c,d,M = {},{},{},{}
total_demand = 0.
for i in I:
for j in J:
c[i,j] = int(100*distance(x[i],y[i],x[j],y[j])) + 1
d[i] = random.randint(1,10)
total_demand += d[i]
total_cap = 0.
r = {}
for j in J:
r[j] = random.randint(0,m)
f[j] = random.randint(100,100+r[j]*m)
M[j] = 1 + 100+r[j]*m - f[j]
# M[j] = int(total_demand/m) + random.randint(1,m)
total_cap += M[j]
for j in J:
M[j] = int(M[j] * total_demand / total_cap + 1) + random.randint(0,r[j])
# print("%s\t%s\t%s" % (j,f[j],M[j])
# print("demand:",total_demand
# print("capacity:",sum([M[j] for j in J])
return I,J,d,M,f,c,x,y
def example():
I,d = multidict({1:80, 2:270, 3:250, 4:160, 5:180}) # demand
J,M,f = multidict({10:[500,100], 11:[500,100], 12:[500,100]}) # capacity, fixed costs
c = {(1,10):4, (1,11):6, (1,12):9, # transportation costs
(2,10):5, (2,11):4, (2,12):7,
(3,10):6, (3,11):3, (3,12):4,
(4,10):8, (4,11):5, (4,12):3,
(5,10):10, (5,11):8, (5,12):4,
}
x_pos = {1:0, 2:0, 3:0, 4:0, 5:0, 10:2, 11:2, 12:2} # positions of the points in the plane
y_pos = {1:2, 2:1, 3:0, 4:-1, 5:-2, 10:1, 11:0, 12:-1}
return I,J,d,M,f,c,x_pos,y_pos
if __name__ == "__main__":
# I,J,d,M,f,c,x_pos,y_pos = example()
random.seed(1)
n = 25
m = 5
I,J,d,M,f,c,x_pos,y_pos = make_data(n,m,same=False)
# from flp_make_data import read_orlib,read_cortinhal
# I,J,d,M,f,c,x_pos,y_pos = read_orlib("DATA/ORLIB/cap101.txt.gz")
# I,J,d,M,f,c,x_pos,y_pos = read_cortinhal("DATA/8_25/A8_25_11.DAT")
# I,J,d,M,f,c,x_pos,y_pos = example()
K = 4
print("demand:",d)
print("cost:",c)
print("fixed:",f)
print("capacity:",M)
# print("x:",x_pos
# print("y:",y_pos
print("number of intervals:",K)
print("\n\n\nflp: multiple selection")
model = flp_nonlinear_mselect(I,J,d,M,f,c,K)
x,X,F = model.data
model.hideOutput() # silent/verbose mode
model.optimize()
objMS = model.getObjVal()
print("Obj.",objMS)
print("\n\n\nflp: convex combination with binary variables")
model = flp_nonlinear_cc_dis(I,J,d,M,f,c,K)
x,X,F = model.data
model.hideOutput() # silent/verbose mode
model.optimize()
objCC = model.getObjVal()
print("Obj.",objCC)
print("\n\n\nflp: convex combination with logarithmic number of binary variables")
model = flp_nonlinear_cc_dis_log(I,J,d,M,f,c,K)
x,X,F = model.data
model.hideOutput() # silent/verbose mode
model.optimize()
objLOG = model.getObjVal()
print("Obj.",objLOG)
print("\n\n\nflp: model with SOS constraints")
model = flp_nonlinear_sos(I,J,d,M,f,c,K)
x,X,F = model.data
model.hideOutput() # silent/verbose mode
model.optimize()
objSOS = model.getObjVal()
print("Obj.",objSOS)
print("\n\n\nflp: aggregated CC model")
model = flp_nonlinear_cc_agg(I,J,d,M,f,c,K)
x,X,F = model.data
model.hideOutput() # silent/verbose mode
model.optimize()
objND = model.getObjVal()
print("Obj.",objND)
print("\n\n\nflp: aggregated CC model, log number variables")
model = flp_nonlinear_cc_agg_log(I,J,d,M,f,c,K)
x,X,F = model.data
model.hideOutput() # silent/verbose mode
model.optimize()
objNDlog = model.getObjVal()
print("Obj.",objNDlog)
EPS = 1.e-4
assert abs(objCC-objMS)<EPS and abs(objLOG-objMS)<EPS and abs(objSOS-objMS)<EPS\
and abs(objSOS-objND)<EPS and abs(objSOS-objNDlog)<EPS
edges = []
flow = {}
for (i,j) in sorted(x):
if model.getVal(x[i,j]) > EPS:
edges.append((i,j))
flow[(i,j)] = model.getVal(x[i,j])
print("\n\n\nflp: model with piecewise linear approximation of cost function")
print("Obj.",model.getObjVal(),"\nedges",sorted(edges))
print("flows:",flow)
if x_pos == None:
exit(0)
try: # plot the result using networkx and matplotlib
import networkx as NX
import matplotlib.pyplot as P
P.clf()
G = NX.Graph()
facilities = J
client = I
G.add_nodes_from(facilities)
G.add_nodes_from(client)
for (i,j) in edges:
G.add_edge(i,j)
position = {}
for i in I + J:
position[i] = (x_pos[i],y_pos[i])
NX.draw(G,position,node_color="g",nodelist=client)
NX.draw(G,position,node_color="y",nodelist=facilities)
P.show()
except ImportError:
print("install 'networkx' and 'matplotlib' for plotting")
|
compiler/rule_translate.py
|
RAbraham/logica
| 1,434 |
131020
|
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Compiler of a single Logica rule to SQL."""
import collections
import copy
import string
import sys
if '.' not in __package__:
from common import color
from compiler import expr_translate
else:
from ..common import color
from ..compiler import expr_translate
xrange = range
def Indent2(s):
return '\n'.join(' ' + l for l in s.split('\n'))
class RuleCompileException(Exception):
"""Exception thrown when user-error is detected at rule-compile time."""
def __init__(self, message, rule_str):
super(RuleCompileException, self).__init__(message)
self.rule_str = rule_str
def ShowMessage(self, stream=sys.stderr):
print(color.Format('{underline}Compiling{end}:'), file=stream)
print(self.rule_str, file=stream)
print(color.Format('\n[ {error}Error{end} ] ') + str(self), file=stream)
def LogicaFieldToSqlField(logica_field):
if isinstance(logica_field, int):
# TODO: Ensure that no collision occurs.
return 'col%d' % logica_field
return logica_field
def HeadToSelect(head):
"""Converting a rule head to a SELECT representation."""
select = collections.OrderedDict()
aggregated_vars = []
for field_value in head['record']['field_value']:
k = field_value['field']
v = field_value['value']
if 'aggregation' in v:
select[k] = copy.deepcopy(v['aggregation']['expression'])
aggregated_vars.append(k)
else:
assert 'expression' in v, 'Bad select value: %s' % str(v)
select[k] = v['expression'] # <=> v as k
return (select, aggregated_vars)
def AllMentionedVariables(x, dive_in_combines=False):
"""Extracting all variables mentioned in an expression."""
r = []
if isinstance(x, dict) and 'variable' in x:
r.append(x['variable']['var_name'])
if isinstance(x, list):
for v in x:
r.extend(AllMentionedVariables(v, dive_in_combines))
if isinstance(x, dict):
for k in x:
# Variables mentioned in 'combine' expression may be resolved via tables
# of the 'combine' expression. So they are not to be included in the
# parent query.
if k != 'combine' or dive_in_combines:
r.extend(AllMentionedVariables(x[k], dive_in_combines))
return set(r)
def ReplaceVariable(old_var, new_expr, s):
"""Replacing a variable in expressoin s."""
if isinstance(s, dict):
member_index = sorted(s.keys(), key=str)
elif isinstance(s, list):
member_index = range(len(s))
else:
assert False, 'Replace should be called on list or dict. Got: %s' % str(s)
for k in member_index:
if (isinstance(s[k], dict) and
'variable' in s[k] and
s[k]['variable']['var_name'] == old_var):
s[k] = new_expr
if isinstance(s, dict):
for k in s:
if isinstance(s[k], dict) or isinstance(s[k], list):
ReplaceVariable(old_var, new_expr, s[k])
if isinstance(s, list):
for k in s:
if isinstance(k, dict) or isinstance(k, list):
ReplaceVariable(old_var, new_expr, k)
class NamesAllocator(object):
"""Allocator of unique names for tables and variables.
Also holds existing built-in function names.
"""
def __init__(self, custom_udfs=None):
self.aux_var_num = 0
self.table_num = 0
self.allocated_tables = set()
self.custom_udfs = custom_udfs or {}
def AllocateVar(self, hint=None):
v = 'x_%d' % self.aux_var_num
self.aux_var_num += 1
return v
def AllocateTable(self, hint_for_user=None):
"""Allocating a table name."""
allowed_chars = set(string.ascii_letters + string.digits + '_./')
if hint_for_user and len(hint_for_user) < 100:
suffix = ''.join(
('_' if c in ['.', '/'] else c)
for c in hint_for_user if c in allowed_chars)
else:
suffix = ''
if suffix and suffix not in self.allocated_tables:
t = suffix
else:
if suffix:
suffix = '_' + suffix
t = 't_%d%s' % (self.table_num, suffix)
self.table_num += 1
self.allocated_tables.add(t)
return t
def FunctionExists(self, function_name):
return (function_name in expr_translate.QL.BasisFunctions() or
function_name in self.custom_udfs)
class ExceptExpression(object):
"""Namespace for constructing and recognizing 'Except' expressions."""
@classmethod
def Build(cls, table_name, except_fields):
return '(SELECT AS STRUCT %s.* EXCEPT (%s))' % (table_name,
','.join(except_fields))
@classmethod
def Recognize(cls, field_name):
# Only 'Except' variables start with "(SELECT AS STRUCT".
return field_name.startswith('(SELECT AS STRUCT')
class RuleStructure(object):
"""Representing a single Logica rule structure.
Can convert itself into an SQL SELECT statement.
"""
def __init__(self, names_allocator=None, external_vocabulary=None,
custom_udfs=None):
# Name of this predicate.
self.this_predicate_name = ''
# Table name to table predicate.
self.tables = collections.OrderedDict()
# Table variable to clause variable map.
self.vars_map = {}
# Clause variable to one table variable.
self.inv_vars_map = {}
self.vars_unification = []
self.constraints = []
self.select = collections.OrderedDict()
self.unnestings = []
self.distinct_vars = []
names_allocator = names_allocator or NamesAllocator(custom_udfs=custom_udfs)
self.allocator = names_allocator
self.external_vocabulary = external_vocabulary
self.synonym_log = {}
self.full_rull_text = None
self.distinct_denoted = None
def OwnVarsVocabulary(self):
"""Returns a map: logica variable -> SQL expression with the value."""
def TableAndFieldToSql(table, field):
if ExceptExpression.Recognize(field):
return field
elif table and field != '*':
return '%s.%s' % (table, field)
elif not table:
return field
else: # field == '*'
return table
return {k: TableAndFieldToSql(v[0], LogicaFieldToSqlField(v[1]))
for k, v in self.inv_vars_map.items()}
def VarsVocabulary(self):
r = {}
r.update(self.OwnVarsVocabulary())
if self.external_vocabulary:
r.update(self.external_vocabulary)
return r
def ExtractedVariables(self):
return set(self.VarsVocabulary().keys())
def InternalVariables(self):
return self.AllVariables() - self.ExtractedVariables()
def AllVariables(self):
r = set()
r |= AllMentionedVariables(self.select)
r |= AllMentionedVariables(self.vars_unification)
r |= AllMentionedVariables(self.constraints)
r |= AllMentionedVariables(self.unnestings)
return r
def SortUnnestings(self):
"""Sorts unnestings in dependency order."""
unnesting_of = {u[0]['variable']['var_name']: u
for u in self.unnestings}
unnesting_variables = set(unnesting_of)
depends_on = {u[0]['variable']['var_name']:
set(AllMentionedVariables(u[1], dive_in_combines=True)) &
unnesting_variables for u in self.unnestings}
unnested = set()
ordered_unnestings = []
while unnesting_of:
for v, u in sorted(unnesting_of.items()):
if depends_on[v] <= unnested:
ordered_unnestings.append(unnesting_of[v])
del unnesting_of[v]
unnested.add(v)
break
else:
raise RuleCompileException(
color.Format(
'There seem to be a circular dependency of {warning}In{end} '
'calls. '
'This error might also come from injected sub-rules.'),
self.full_rule_text)
self.unnestings = ordered_unnestings
def ElliminateInternalVariables(self, assert_full_ellimination=False):
"""Elliminates internal variables via substitution."""
variables = self.InternalVariables()
while True:
done = True
for u in self.vars_unification:
for k, r in [['left', 'right'], ['right', 'left']]:
if u[k] == u[r]:
continue
ur_variables = AllMentionedVariables(u[r])
ur_variables_incl_combines = AllMentionedVariables(
u[r], dive_in_combines=True)
if (isinstance(u[k], dict) and
'variable' in u[k] and
u[k]['variable']['var_name'] in variables and
u[k]['variable']['var_name'] not in ur_variables_incl_combines and
(
ur_variables <= self.ExtractedVariables() or
not str(u[k]['variable']['var_name']).startswith('x_'))):
u_left = u[k]['variable']['var_name']
u_right = u[r]
if 'variable' in u_right:
l = self.synonym_log.get(u_right['variable']['var_name'], [])
l.append(u_left)
l.extend(self.synonym_log.get(u_left, []))
self.synonym_log[u_right['variable']['var_name']] = l
ReplaceVariable(u_left, u_right, self.unnestings)
ReplaceVariable(u_left, u_right, self.select)
ReplaceVariable(u_left, u_right, self.vars_unification)
ReplaceVariable(u_left, u_right, self.constraints)
done = False
if done:
variables = self.InternalVariables()
if assert_full_ellimination:
if True:
if variables:
violators = []
for v in variables:
violators.extend(self.synonym_log.get(v, []))
violators.append(v)
violators = {v for v in violators if not v.startswith('x_')}
assert violators, (
'Logica needs better error messages: purely internal '
'variable was not eliminated. It looks like you have '
'not passed a required argument to some called predicate. '
'Use --add_debug_info_to_var_names flag to make this message '
'a little more informatvie. '
'Variables: %s, synonym_log: %s' % (str(variables),
str(self.synonym_log)))
# Remove disambiguation suffixes from variables not to confuse
# the user.
violators = {v.split(' # disambiguated')[0] for v in violators}
raise RuleCompileException(
color.Format(
'Found no way to assign variables: '
'{warning}{violators}{end}. '
'This error might also come from injected sub-rules.',
dict(violators=', '.join(sorted(violators)))),
self.full_rule_text)
else:
assert not variables, (
'Not all internal variables were eliminated. Violators:\n' +
',\n'.join(
'%s (aka %s)' % (v, self.synonym_log[v])
for v in variables) +
'\nRule: %s' % self)
else:
unassigned_variables = []
for v in variables:
if not v.startswith('x_'):
unassigned_variables.append(v)
# Remove disambiguation suffixes from variables not to confuse
# the user.
unassigned_variables = {v.split(' # disambiguated')[0]
for v in unassigned_variables}
if unassigned_variables:
raise RuleCompileException(
color.Format(
'Found no way to assign variables: '
'{warning}{violators}{end}. '
'This error might also come from injected sub-rules.',
dict(violators=', '.join(sorted(unassigned_variables)))),
self.full_rule_text)
break
def __str__(self):
return ('%s ==> \n'
'tables = %s,\n '
'vars_map = %s,\n '
'vars_unification = %s,\n '
'external_vocabulary = %s,\n '
'constraints = %s,\n '
'select = %s,\n '
'unnest = %s' % (
self.this_predicate_name,
self.tables, self.vars_map, self.vars_unification,
self.external_vocabulary,
self.constraints, self.select, self.unnestings))
def UnificationsToConstraints(self):
for u in self.vars_unification:
if u['left'] == u['right']:
continue
self.constraints.append({
'call': {
'predicate_name': '==',
'record': {
'field_value': [{
'field': 'left',
'value': {
'expression': u['left']
}
}, {
'field': 'right',
'value': {
'expression': u['right']
}
}]
}
}
})
def AsSql(self, subquery_encoder=None, flag_values=None):
"""Outputing SQL representing this structure."""
# pylint: disable=g-long-lambda
ql = expr_translate.QL(self.VarsVocabulary(), subquery_encoder,
lambda message:
RuleCompileException(message, self.full_rule_text),
flag_values,
custom_udfs=subquery_encoder.execution.custom_udfs,
dialect=subquery_encoder.execution.dialect)
r = 'SELECT\n'
fields = []
if not self.select:
raise RuleCompileException(
color.Format(
'Tables with {warning}no columns{end} are not allowed in '
'StandardSQL, so they are not allowed in Logica.'),
self.full_rule_text)
for k, v in self.select.items():
if k == '*':
fields.append('%s.*' % ql.ConvertToSql(v))
else:
fields.append('%s AS %s' % (ql.ConvertToSql(v), LogicaFieldToSqlField(k)))
r += ',\n'.join(' ' + f for f in fields)
if (self.tables or self.unnestings or
self.constraints or self.distinct_denoted):
r += '\nFROM\n'
tables = []
for k, v in self.tables.items():
if subquery_encoder:
# Note that we are passing external_vocabulary, not VarsVocabulary
# here. I.e. if this is a sub-query then variables of outer tables
# can be used.
sql = subquery_encoder.TranslateTable(v, self.external_vocabulary)
if not sql:
raise RuleCompileException(
color.Format(
'Rule uses table {warning}{table}{end}, which is not '
'defined. External tables can not be used in '
'{warning}\'testrun\'{end} mode. This error may come '
'from injected sub-rules.',
dict(table=v)), self.full_rule_text)
if sql != k:
tables.append(sql + ' AS ' + k)
else:
tables.append(sql)
self.SortUnnestings()
for element, the_list in self.unnestings:
tables.append(
subquery_encoder.execution.dialect.UnnestPhrase().format(
ql.ConvertToSql(the_list), ql.ConvertToSql(element)))
if not tables:
tables.append('(SELECT "singleton" as s) as unused_singleton')
from_str = ', '.join(tables)
# Indent the from_str.
from_str = '\n'.join(' ' + l for l in from_str.split('\n'))
r += from_str
if self.constraints:
r += '\nWHERE\n'
constraints = []
for c in self.constraints:
constraints.append(ql.ConvertToSql(c))
r += ' AND\n'.join(map(Indent2, constraints))
if self.distinct_vars:
ordered_distinct_vars = [
v for v in self.select.keys() if v in self.distinct_vars]
r += '\nGROUP BY '
if subquery_encoder.execution.dialect.GroupBySpecBy() == 'name':
r += ', '.join(map(LogicaFieldToSqlField, ordered_distinct_vars))
elif subquery_encoder.execution.dialect.GroupBySpecBy() == 'index':
selected_fields = list(self.select.keys())
r += ', '.join(str(selected_fields.index(v) + 1)
for v in ordered_distinct_vars)
elif subquery_encoder.execution.dialect.GroupBySpecBy() == 'expr':
r += ', '.join(
ql.ConvertToSql(self.select[k]) for k in ordered_distinct_vars
)
else:
assert False, 'Broken dialect %s, group by spec: %s' % (
subquery_encoder.execution.dialect.Name(),
subquery_encoder.execution.dialect.GroupBySpecBy())
return r
def ExtractPredicateStructure(c, s):
"""Updating RuleStructure s with a predicate call."""
predicate = c['predicate_name']
if predicate in (
'<=', '<', '>', '>=', '!=', '&&', '||', '!', 'IsNull', 'Like',
'Constraint'):
s.constraints.append({'call': c})
return
table_name = s.allocator.AllocateTable(predicate)
s.tables[table_name] = predicate
for field_value in c['record']['field_value']:
assert 'field' in field_value, ('Corrupt record: %s' % c['record'])
if 'except' in field_value:
table_var = ExceptExpression.Build(table_name, field_value['except'])
else:
table_var = field_value['field']
expr = field_value['value']['expression']
var_name = s.allocator.AllocateVar('%s_%s' % (table_name, table_var))
s.vars_map[table_name, table_var] = var_name
s.inv_vars_map[var_name] = (table_name, table_var)
s.vars_unification.append(
{
'left': {'variable': {'var_name': var_name}},
'right': expr
})
def ExtractInclusionStructure(inclusion, s):
"""Updating RuleStructure s with an inclusion."""
# Handling inclusion as a WHERE constraint.
if 'call' in inclusion['list']:
if inclusion['list']['call']['predicate_name'] == 'Container':
s.constraints.append({
'call': {
'predicate_name': 'In',
'record': {
'field_value': [
{
'field': 'left',
'value': {'expression': inclusion['element']}
},
{
'field': 'right',
'value': {'expression': inclusion['list']}
}
]
}
}
})
return
# Handling inclusion as an UNNEST.
var_name = s.allocator.AllocateVar('unnest_`%s`' % inclusion['element'])
s.vars_map[None, var_name] = var_name
s.inv_vars_map[var_name] = (None, var_name)
s.unnestings.append([{'variable': {'var_name': var_name}}, inclusion['list']])
s.vars_unification.append({
'left': inclusion['element'],
'right': {
'call': {
'predicate_name': 'ValueOfUnnested',
'record': {
'field_value': [{
'field': 0,
'value': {
'expression': {
'variable': {
'var_name': var_name
}
}
}
}]
}
}
}
})
def ExtractConjunctiveStructure(conjuncts, s):
"""Updates RuleStructure with the conjuncts."""
for c in conjuncts:
if 'predicate' in c:
ExtractPredicateStructure(c['predicate'], s)
elif 'unification' in c:
if ('variable' in c['unification']['right_hand_side'] or
'variable' in c['unification']['left_hand_side']):
s.vars_unification.append({
'left': c['unification']['left_hand_side'],
'right': c['unification']['right_hand_side']})
else:
if (c['unification']['left_hand_side'] !=
c['unification']['right_hand_side']):
s.constraints.append({
'call': {
'predicate_name': '==',
'record': {
'field_value': [
{
'field': 'left',
'value': {
'expression':
c['unification']['left_hand_side']
}
},
{
'field': 'right',
'value': {
'expression':
c['unification']['right_hand_side']
}
}
]
}
}
})
elif 'inclusion' in c:
ExtractInclusionStructure(c['inclusion'], s)
else:
assert False, 'Unsupported conjunct: %s' % c
def HasCombine(r):
"""Whether structure involves Combine predicate."""
if isinstance(r, dict):
member_index = sorted(r.keys())
elif isinstance(r, list):
member_index = range(len(r))
else:
assert False, (
'HasCombine should be called on list or dict. Got: %s' % str(r))
if isinstance(r, dict):
if 'predicate_name' in r and r['predicate_name'] == 'Combine':
return True
for k in member_index:
if isinstance(r[k], dict) or isinstance(r[k], list):
if HasCombine(r[k]):
return True
return False
def AllRecordFields(record):
result = []
for field_value in record['field_value']:
result.append(field_value['field'])
return result
def InlinePredicateValuesRecursively(r, names_allocator, conjuncts):
"""Replaces expression predicate calls with logica_value column."""
if isinstance(r, dict):
member_index = sorted(r.keys())
elif isinstance(r, list):
member_index = range(len(r))
else:
assert False, (
'InlinePredicateValuesRecursively should be called on list or dict. '
'Got: %s' % str(r))
for k in member_index:
if k != 'combine':
if isinstance(r[k], dict) or isinstance(r[k], list):
InlinePredicateValuesRecursively(r[k], names_allocator, conjuncts)
if isinstance(r, dict):
if 'call' in r:
if not names_allocator.FunctionExists(r['call']['predicate_name']):
aux_var = names_allocator.AllocateVar('inline')
r_predicate = {}
r_predicate['predicate'] = copy.deepcopy(r['call'])
r_predicate['predicate']['record']['field_value'].append({
'field': 'logica_value',
'value': {'expression': {'variable': {'var_name': aux_var}}}
})
del r['call']
r['variable'] = {'var_name': aux_var}
conjuncts.append(r_predicate)
def InlinePredicateValues(rule, names_allocator):
extra_conjuncts = []
InlinePredicateValuesRecursively(rule, names_allocator, extra_conjuncts)
if extra_conjuncts:
conjuncts = rule.get('body', {}).get('conjunction', {}).get('conjunct', [])
conjuncts.extend(extra_conjuncts)
rule['body'] = {'conjunction': {'conjunct': conjuncts}}
def GetTreeOfCombines(rule, tree=None):
"""Get the tree structure of combines in the rule syntax subtree."""
if not tree:
tree = {'rule': rule, 'variables': set(), 'subtrees': []}
if isinstance(rule, list):
for v in rule:
tree = GetTreeOfCombines(v, tree)
if isinstance(rule, dict):
if 'variable' in rule:
variables = tree['variables']
variables.add(rule['variable']['var_name'])
for k in rule:
# Variables mentioned in 'combine' expression may be resolved via tables
# of the 'combine' expression. So they are not to be included in the
# parent query.
if k != 'combine':
tree = GetTreeOfCombines(rule[k], tree)
else:
subtree = GetTreeOfCombines(rule[k])
subtrees = tree['subtrees']
subtrees.append(subtree)
return tree
def DisambiguateCombineVariables(rule, names_allocator):
"""Disambiguate variables in combine expressions.
Variables of the same name in different combine statements are actually
different. The same name becomes a problem if one combine statement is
substituted into another when unifications are processed.
This function appends a disambiguation suffix to all variables first
mentioned in combine statements.
Args:
rule: A rule to process.
names_allocator: An execution level allocator of variable names.
"""
def Replace(tree, outer_variables):
"""Replace all variables with their disambiguated counterparts."""
variables = tree['variables']
introduced_variables = variables - outer_variables
all_variables = variables | outer_variables
for v in introduced_variables:
if '# disambiguated with' in v:
# This variable was already disambiguated.
# We get here, when ExtractRuleStructure is called on the combine
# expression itself.
continue
new_name = '%s # disambiguated with %s' % (
v, names_allocator.AllocateVar('combine_dis'))
ReplaceVariable(v, {'variable': {'var_name': new_name}}, tree['rule'])
for s in tree['subtrees']:
Replace(s, all_variables)
tree = GetTreeOfCombines(rule)
top_variables = tree['variables']
for t in tree['subtrees']:
Replace(t, top_variables)
def ExtractRuleStructure(rule, names_allocator=None, external_vocabulary=None):
"""Extracts RuleStructure from rule."""
rule = copy.deepcopy(rule)
# Not disambiguating if this rule is extracting structure of the combine
# itself, as variables of this combine were already disambiguated from
# parent rule.
if rule['head']['predicate_name'] != 'Combine':
DisambiguateCombineVariables(rule, names_allocator)
s = RuleStructure(names_allocator, external_vocabulary)
InlinePredicateValues(rule, names_allocator)
s.full_rule_text = rule['full_text']
s.this_predicate_name = rule['head']['predicate_name']
(s.select, aggregated_vars) = HeadToSelect(rule['head'])
# Adding internal variable unification with select arguments to avoid
# confusion of user variables between injected predicates.
for k, expr in s.select.items():
if 'variable' in expr:
s.vars_unification.append({
'left': expr,
'right': {'variable': {'var_name': names_allocator.AllocateVar(
'extract_%s_%s' % (s.this_predicate_name, k))}}})
if 'body' in rule:
ExtractConjunctiveStructure(rule['body']['conjunction']['conjunct'], s)
distinct_denoted = 'distinct_denoted' in rule
s.distinct_denoted = distinct_denoted
if aggregated_vars and not distinct_denoted:
raise RuleCompileException(
color.Format(
'Aggregating predicate must be {warning}distinct{end} denoted.'),
s.full_rule_text)
if distinct_denoted:
s.distinct_vars = sorted(
list(set(s.select.keys()) - set(aggregated_vars)), key=str)
return s
|
models.py
|
OpnTec/open-event-scraper
| 1,946 |
131021
|
class Track(object):
id = 0
header_line = 1
filename = ""
key_color = "#FF4D55"
name = ""
description = ""
track_image_url = "http://lorempixel.com/400/200"
location = ""
gid = ""
order = -1
def __init__(self, id, name, header_line, key_color, location, gid, order):
super(Track, self).__init__()
self.id = id
self.name = name
self.header_line = header_line
self.key_color = key_color
self.track_image_url = "http://lorempixel.com/400/200"
self.location = location
self.gid = gid
self.order = order
class Service(object):
id = 0
service = ""
url = ""
def __init__(self, id, service, url):
super(Service, self).__init__()
self.id = id
self.service = service
self.url = url
class LogoIco(object):
logo_url = ""
ico_url = ""
main_page_url = ""
def __init__(self, logo_url, ico_url, main_page_url):
super(LogoIco, self).__init__()
self.logo_url = logo_url
self.ico_url = ico_url
self.main_page_url = main_page_url
class Speaker(object):
def __init__(self):
super(Speaker, self).__init__()
class Copyright(object):
def __init__(self):
super(Copyright, self).__init__()
class Session(object):
def __init__(self):
super(Session, self).__init__()
class Sponsor(object):
def __init__(self):
super(Sponsor, self).__init__()
class Microlocation(object):
def __init__(self):
super(Microlocation, self).__init__()
|
django/CVE-2021-35042/web/vuln/apps.py
|
nobgr/vulhub
| 9,681 |
131028
|
<reponame>nobgr/vulhub<gh_stars>1000+
from django.apps import AppConfig
class VulnConfig(AppConfig):
name = 'vuln'
default_auto_field = 'django.db.models.BigAutoField'
|
pytorch/main.py
|
bangab/GMVAE
| 177 |
131032
|
"""
---------------------------------------------------------------------
-- Author: <NAME>
---------------------------------------------------------------------
Main file to execute the model on the MNIST dataset
"""
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import argparse
import random
import numpy as np
import os
import torch
from torchvision import datasets, transforms
from torch.utils.data.sampler import SubsetRandomSampler
import torch.utils.data
from model.GMVAE import *
#########################################################
## Input Parameters
#########################################################
parser = argparse.ArgumentParser(description='PyTorch Implementation of DGM Clustering')
## Used only in notebooks
parser.add_argument('-f', '--file',
help='Path for input file. First line should contain number of lines to search in')
## Dataset
parser.add_argument('--dataset', type=str, choices=['mnist'],
default='mnist', help='dataset (default: mnist)')
parser.add_argument('--seed', type=int, default=0, help='random seed (default: 0)')
## GPU
parser.add_argument('--cuda', type=int, default=1,
help='use of cuda (default: 1)')
parser.add_argument('--gpuID', type=int, default=0,
help='set gpu id to use (default: 0)')
## Training
parser.add_argument('--epochs', type=int, default=100,
help='number of total epochs to run (default: 200)')
parser.add_argument('--batch_size', default=64, type=int,
help='mini-batch size (default: 64)')
parser.add_argument('--batch_size_val', default=200, type=int,
help='mini-batch size of validation (default: 200)')
parser.add_argument('--learning_rate', default=1e-3, type=float,
help='learning rate (default: 0.001)')
parser.add_argument('--decay_epoch', default=-1, type=int,
help='Reduces the learning rate every decay_epoch')
parser.add_argument('--lr_decay', default=0.5, type=float,
help='Learning rate decay for training (default: 0.5)')
## Architecture
parser.add_argument('--num_classes', type=int, default=10,
help='number of classes (default: 10)')
parser.add_argument('--gaussian_size', default=64, type=int,
help='gaussian size (default: 64)')
parser.add_argument('--input_size', default=784, type=int,
help='input size (default: 784)')
## Partition parameters
parser.add_argument('--train_proportion', default=1.0, type=float,
help='proportion of examples to consider for training only (default: 1.0)')
## Gumbel parameters
parser.add_argument('--init_temp', default=1.0, type=float,
help='Initial temperature used in gumbel-softmax (recommended 0.5-1.0, default:1.0)')
parser.add_argument('--decay_temp', default=1, type=int,
help='Set 1 to decay gumbel temperature at every epoch (default: 1)')
parser.add_argument('--hard_gumbel', default=0, type=int,
help='Set 1 to use the hard version of gumbel-softmax (default: 1)')
parser.add_argument('--min_temp', default=0.5, type=float,
help='Minimum temperature of gumbel-softmax after annealing (default: 0.5)' )
parser.add_argument('--decay_temp_rate', default=0.013862944, type=float,
help='Temperature decay rate at every epoch (default: 0.013862944)')
## Loss function parameters
parser.add_argument('--w_gauss', default=1, type=float,
help='weight of gaussian loss (default: 1)')
parser.add_argument('--w_categ', default=1, type=float,
help='weight of categorical loss (default: 1)')
parser.add_argument('--w_rec', default=1, type=float,
help='weight of reconstruction loss (default: 1)')
parser.add_argument('--rec_type', type=str, choices=['bce', 'mse'],
default='bce', help='desired reconstruction loss function (default: bce)')
## Others
parser.add_argument('--verbose', default=0, type=int,
help='print extra information at every epoch.(default: 0)')
parser.add_argument('--random_search_it', type=int, default=20,
help='iterations of random search (default: 20)')
args = parser.parse_args()
if args.cuda == 1:
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpuID)
## Random Seed
SEED = args.seed
np.random.seed(SEED)
random.seed(SEED)
torch.manual_seed(SEED)
if args.cuda:
torch.cuda.manual_seed(SEED)
#########################################################
## Read Data
#########################################################
if args.dataset == "mnist":
print("Loading mnist dataset...")
# Download or load downloaded MNIST dataset
train_dataset = datasets.MNIST('./mnist', train=True, download=True, transform=transforms.ToTensor())
test_dataset = datasets.MNIST('./mnist', train=False, transform=transforms.ToTensor())
#########################################################
## Data Partition
#########################################################
def partition_dataset(n, proportion=0.8):
train_num = int(n * proportion)
indices = np.random.permutation(n)
train_indices, val_indices = indices[:train_num], indices[train_num:]
return train_indices, val_indices
if args.train_proportion == 1.0:
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size_val, shuffle=False)
val_loader = test_loader
else:
train_indices, val_indices = partition_dataset(len(train_dataset), args.train_proportion)
# Create data loaders for train, validation and test datasets
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, sampler=SubsetRandomSampler(train_indices))
val_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size_val, sampler=SubsetRandomSampler(val_indices))
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size_val, shuffle=False)
## Calculate flatten size of each input data
args.input_size = np.prod(train_dataset[0][0].size())
print(args.input_size)
#########################################################
## Train and Test Model
#########################################################
gmvae = GMVAE(args)
## Training Phase
history_loss = gmvae.train(train_loader, val_loader)
## Testing Phase
accuracy, nmi = gmvae.test(test_loader)
print("Testing phase...")
print("Accuracy: %.5lf, NMI: %.5lf" % (accuracy, nmi) )
|
tests/test_client.py
|
ysinjab/erppeek
| 134 |
131043
|
<filename>tests/test_client.py
# -*- coding: utf-8 -*-
import mock
from mock import call, sentinel, ANY
import erppeek
from ._common import XmlRpcTestCase, OBJ
AUTH = sentinel.AUTH
ID1, ID2 = 4001, 4002
STABLE = ['uninstallable', 'uninstalled', 'installed']
def _skip_test(test_case):
pass
class IdentDict(object):
def __init__(self, _id):
self._id = _id
def __repr__(self):
return 'IdentDict(%s)' % (self._id,)
def __getitem__(self, key):
return (key == 'id') and self._id or ('v_%s_%s' % (key, self._id))
def __eq__(self, other):
return self._id == other._id
DIC1 = IdentDict(ID1)
DIC2 = IdentDict(ID2)
class TestService(XmlRpcTestCase):
"""Test the Service class."""
protocol = 'xmlrpc'
def _patch_service(self):
return mock.patch('erppeek.ServerProxy._ServerProxy__request').start()
def _get_client(self):
client = mock.Mock()
client._server = 'http://127.0.0.1:8069/%s' % self.protocol
proxy = getattr(erppeek.Client, '_proxy_%s' % self.protocol)
client._proxy = proxy.__get__(client, erppeek.Client)
return client
def test_service(self):
client = self._get_client()
svc_alpha = erppeek.Service(client, 'alpha', ['beta'])
self.assertIn('alpha', str(svc_alpha.beta))
self.assertRaises(AttributeError, getattr, svc_alpha, 'theta')
if self.protocol == 'xmlrpc':
self.assertIn('_ServerProxy__request', str(svc_alpha.beta(42)))
self.assertCalls(call('beta', (42,)), "().__str__")
else:
self.assertCalls()
self.assertOutput('')
def test_service_openerp(self):
client = self._get_client()
def get_proxy(name, methods=None):
if methods is None:
methods = erppeek._methods.get(name, ())
return erppeek.Service(client, name, methods, verbose=False)
self.assertIn('common', str(get_proxy('common').login))
login = get_proxy('common').login('aaa')
with self.assertRaises(AttributeError):
get_proxy('common').non_existent
if self.protocol == 'xmlrpc':
self.assertIn('_ServerProxy__request', str(login))
self.assertCalls(call('login', ('aaa',)), 'call().__str__')
else:
self.assertEqual(login, 'JSON_RESULT',)
self.assertCalls(ANY)
self.assertOutput('')
def test_service_openerp_client(self, server_version=11.0):
server = 'http://127.0.0.1:8069/%s' % self.protocol
return_values = [str(server_version), ['newdb'], 1]
if self.protocol == 'jsonrpc':
return_values = [{'result': rv} for rv in return_values]
self.service.side_effect = return_values
client = erppeek.Client(server, 'newdb', 'usr', 'pss')
self.service.return_value = ANY
self.assertIsInstance(client.db, erppeek.Service)
self.assertIsInstance(client.common, erppeek.Service)
self.assertIsInstance(client._object, erppeek.Service)
if server_version >= 11.0:
self.assertIs(client._report, None)
self.assertIs(client._wizard, None)
elif server_version >= 7.0:
self.assertIsInstance(client._report, erppeek.Service)
self.assertIs(client._wizard, None)
else:
self.assertIsInstance(client._report, erppeek.Service)
self.assertIsInstance(client._wizard, erppeek.Service)
self.assertIn('/%s|db' % self.protocol, str(client.db.create_database))
self.assertIn('/%s|db' % self.protocol, str(client.db.db_exist))
if server_version >= 8.0:
self.assertRaises(AttributeError, getattr,
client.db, 'create')
self.assertRaises(AttributeError, getattr,
client.db, 'get_progress')
else:
self.assertIn('/%s|db' % self.protocol, str(client.db.create))
self.assertIn('/%s|db' % self.protocol, str(client.db.get_progress))
self.assertCalls(ANY, ANY, ANY)
self.assertOutput('')
def test_service_openerp_50_to_70(self):
self.test_service_openerp_client(server_version=7.0)
self.test_service_openerp_client(server_version=6.1)
self.test_service_openerp_client(server_version=6.0)
self.test_service_openerp_client(server_version=5.0)
def test_service_odoo_80_90(self):
self.test_service_openerp_client(server_version=9.0)
self.test_service_openerp_client(server_version=8.0)
def test_service_odoo_10_11(self):
self.test_service_openerp_client(server_version=11.0)
self.test_service_openerp_client(server_version=10.0)
class TestServiceJsonRpc(TestService):
"""Test the Service class with JSON-RPC."""
protocol = 'jsonrpc'
def _patch_service(self):
return mock.patch('erppeek.http_post', return_value={'result': 'JSON_RESULT'}).start()
class TestCreateClient(XmlRpcTestCase):
"""Test the Client class."""
server_version = '6.1'
startup_calls = (
call(ANY, 'db', ANY, verbose=ANY),
'db.server_version',
call(ANY, 'db', ANY, verbose=ANY),
call(ANY, 'common', ANY, verbose=ANY),
call(ANY, 'object', ANY, verbose=ANY),
call(ANY, 'report', ANY, verbose=ANY),
call(ANY, 'wizard', ANY, verbose=ANY),
'db.list',
)
def test_create(self):
self.service.db.list.return_value = ['newdb']
self.service.common.login.return_value = 1
client = erppeek.Client('http://127.0.0.1:8069', 'newdb', 'usr', 'pss')
expected_calls = self.startup_calls + (
('common.login', 'newdb', 'usr', 'pss'),)
self.assertIsInstance(client, erppeek.Client)
self.assertCalls(*expected_calls)
self.assertEqual(
client._login.cache,
{('http://127.0.0.1:8069/xmlrpc', 'newdb', 'usr'): (1, 'pss')})
self.assertOutput('')
def test_create_getpass(self):
getpass = mock.patch('getpass.getpass',
return_value='password').start()
self.service.db.list.return_value = ['database']
expected_calls = self.startup_calls + (
('common.login', 'database', 'usr', 'password'),)
# A: Invalid login
self.assertRaises(erppeek.Error, erppeek.Client,
'http://127.0.0.1:8069', 'database', 'usr')
self.assertCalls(*expected_calls)
self.assertEqual(getpass.call_count, 1)
# B: Valid login
self.service.common.login.return_value = 17
getpass.reset_mock()
client = erppeek.Client('http://127.0.0.1:8069', 'database', 'usr')
self.assertIsInstance(client, erppeek.Client)
self.assertCalls(*expected_calls)
self.assertEqual(getpass.call_count, 1)
def test_create_with_cache(self):
self.service.db.list.return_value = ['database']
self.assertFalse(erppeek.Client._login.cache)
erppeek.Client._login.cache[
('http://127.0.0.1:8069/xmlrpc', 'database', 'usr')] = (1, 'password')
client = erppeek.Client('http://127.0.0.1:8069', 'database', 'usr')
expected_calls = self.startup_calls + (
('object.execute', 'database', 1, 'password',
'res.users', 'fields_get_keys'),)
self.assertIsInstance(client, erppeek.Client)
self.assertCalls(*expected_calls)
self.assertOutput('')
def test_create_from_config(self):
env_tuple = ('http://127.0.0.1:8069', 'database', 'usr', None)
read_config = mock.patch('erppeek.read_config',
return_value=env_tuple).start()
getpass = mock.patch('getpass.getpass',
return_value='password').start()
self.service.db.list.return_value = ['database']
expected_calls = self.startup_calls + (
('common.login', 'database', 'usr', 'password'),)
# A: Invalid login
self.assertRaises(erppeek.Error, erppeek.Client.from_config, 'test')
self.assertCalls(*expected_calls)
self.assertEqual(read_config.call_count, 1)
self.assertEqual(getpass.call_count, 1)
# B: Valid login
self.service.common.login.return_value = 17
read_config.reset_mock()
getpass.reset_mock()
client = erppeek.Client.from_config('test')
self.assertIsInstance(client, erppeek.Client)
self.assertCalls(*expected_calls)
self.assertEqual(read_config.call_count, 1)
self.assertEqual(getpass.call_count, 1)
def test_create_invalid(self):
# Without mock
self.service.stop()
self.assertRaises(EnvironmentError, erppeek.Client, 'dsadas')
self.assertOutput('')
class TestSampleSession(XmlRpcTestCase):
server_version = '6.1'
server = 'http://127.0.0.1:8069'
database = 'database'
user = 'user'
password = '<PASSWORD>'
uid = 1
def test_simple(self):
self.service.object.execute.side_effect = [
42, [{'model': 'res.users'}], 4, sentinel.IDS, sentinel.CRON]
c = self.client
res_users = c.model('res.users')
self.assertIs(c.ResUsers, res_users)
self.assertEqual(c.ResUsers.count(), 4)
self.assertEqual(c.read('ir.cron', ['active = False'],
'active function'), sentinel.CRON)
self.assertCalls(
OBJ('ir.model', 'search', [('model', 'like', 'res.users')]),
OBJ('ir.model', 'read', 42, ('model',)),
OBJ('res.users', 'search_count', []),
OBJ('ir.cron', 'search', [('active', '=', False)]),
OBJ('ir.cron', 'read', sentinel.IDS, ['active', 'function']),
)
self.assertOutput('')
def test_list_modules(self):
self.service.object.execute.side_effect = [
['delivery_a', 'delivery_b'],
[{'state': 'not installed', 'name': 'dummy'}]]
modules = self.client.modules('delivery')
self.assertIsInstance(modules, dict)
self.assertIn('not installed', modules)
imm = ('object.execute', AUTH, 'ir.module.module')
self.assertCalls(
imm + ('search', [('name', 'like', 'delivery')]),
imm + ('read', ['delivery_a', 'delivery_b'], ['name', 'state']),
)
self.assertOutput('')
def test_module_upgrade(self):
self.service.object.execute.side_effect = [
(42, 0), [42], [], ANY, [42],
[{'id': 42, 'state': ANY, 'name': ANY}], ANY]
result = self.client.upgrade('dummy')
self.assertIsNone(result)
imm = ('object.execute', AUTH, 'ir.module.module')
bmu = ('object.execute', AUTH, 'base.module.upgrade')
self.assertCalls(
imm + ('update_list',),
imm + ('search', [('name', 'in', ('dummy',))]),
imm + ('search', [('state', 'not in', STABLE)]),
imm + ('button_upgrade', [42]),
imm + ('search', [('state', 'not in', STABLE)]),
imm + ('read', [42], ['name', 'state']),
bmu + ('upgrade_module', []),
)
self.assertOutput(ANY)
class TestSampleSession50(TestSampleSession):
server_version = '5.0'
def test_module_upgrade(self):
self.service.object.execute.side_effect = [
(42, 0), [42], [], ANY, [42],
[{'id': 42, 'state': ANY, 'name': ANY}]]
self.service.wizard.create.return_value = 17
self.service.wizard.execute.return_value = {'state': (['config'],)}
result = self.client.upgrade('dummy')
self.assertIsNone(result)
imm = ('object.execute', AUTH, 'ir.module.module')
self.assertCalls(
imm + ('update_list',),
imm + ('search', [('name', 'in', ('dummy',))]),
imm + ('search', [('state', 'not in', STABLE)]),
imm + ('button_upgrade', [42]),
imm + ('search', [('state', 'not in', STABLE)]),
imm + ('read', [42], ['name', 'state']),
('wizard.create', AUTH, 'module.upgrade'),
('wizard.execute', AUTH, 17, {}, 'start', None),
)
self.assertOutput(ANY)
class TestClientApi(XmlRpcTestCase):
"""Test the Client API."""
server_version = '6.1'
server = 'http://127.0.0.1:8069'
database = 'database'
user = 'user'
password = '<PASSWORD>'
uid = 1
def obj_exec(self, *args):
if args[4] == 'search':
return [ID2, ID1]
if args[4] == 'read':
return [IdentDict(res_id) for res_id in args[5][::-1]]
return sentinel.OTHER
def test_create_database(self):
create_database = self.client.create_database
self.client.db.list.side_effect = [['db1'], ['db2']]
create_database('abc', 'db1')
create_database('xyz', 'db2', user_password='<PASSWORD>', lang='fr_FR')
self.assertCalls(
call.db.create_database('abc', 'db1', False, 'en_US', 'admin'),
call.db.list(),
call.common.login('db1', 'admin', 'admin'),
call.db.create_database('xyz', 'db2', False, 'fr_FR', 'secret'),
call.db.list(),
call.common.login('db2', 'admin', 'secret'),
)
self.assertOutput('')
if float(self.server_version) < 9.0:
self.assertRaises(erppeek.Error, create_database, 'xyz', 'db2', user_password='<PASSWORD>', lang='fr_FR', login='other_login', country_code='CA')
self.assertRaises(erppeek.Error, create_database, 'xyz', 'db2', login='other_login')
self.assertRaises(erppeek.Error, create_database, 'xyz', 'db2', country_code='CA')
self.assertOutput('')
return
# Odoo 9
self.client.db.list.side_effect = [['db2']]
create_database('xyz', 'db2', user_password='<PASSWORD>', lang='fr_FR', login='other_login', country_code='CA')
self.assertCalls(
call.db.create_database('xyz', 'db2', False, 'fr_FR', 'secret', 'other_login', 'CA'),
call.db.list(),
call.common.login('db2', 'other_login', 'secret'),
)
self.assertOutput('')
def test_search(self):
search = self.client.search
self.service.object.execute.side_effect = self.obj_exec
searchterm = 'name like Morice'
self.assertEqual(search('foo.bar', [searchterm]), [ID2, ID1])
self.assertEqual(search('foo.bar', [searchterm], limit=2), [ID2, ID1])
self.assertEqual(search('foo.bar', [searchterm], offset=80, limit=99),
[ID2, ID1])
self.assertEqual(search('foo.bar', [searchterm], order='name ASC'),
[ID2, ID1])
search('foo.bar', ['name = mushroom', 'state != draft'])
search('foo.bar', [('name', 'like', 'Morice')])
self.client.execute('foo.bar', 'search', [('name like Morice')])
search('foo.bar', [])
search('foo.bar')
domain = [('name', 'like', 'Morice')]
domain2 = [('name', '=', 'mushroom'), ('state', '!=', 'draft')]
self.assertCalls(
OBJ('foo.bar', 'search', domain),
OBJ('foo.bar', 'search', domain, 0, 2, None),
OBJ('foo.bar', 'search', domain, 80, 99, None),
OBJ('foo.bar', 'search', domain, 0, None, 'name ASC'),
OBJ('foo.bar', 'search', domain2),
OBJ('foo.bar', 'search', domain),
OBJ('foo.bar', 'search', domain),
OBJ('foo.bar', 'search', []),
OBJ('foo.bar', 'search', []),
)
self.assertOutput('')
# No longer supported since 1.6
search('foo.bar', 'name like Morice')
self.assertCalls(OBJ('foo.bar', 'search', 'name like Morice'))
search('foo.bar', ['name like Morice'], missingkey=42)
self.assertCalls(OBJ('foo.bar', 'search', domain))
self.assertOutput('Ignoring: missingkey = 42\n')
self.assertRaises(TypeError, search)
self.assertRaises(AssertionError, search, object())
self.assertRaises(ValueError, search, 'foo.bar', ['abc'])
self.assertRaises(ValueError, search, 'foo.bar', ['< id'])
self.assertRaises(ValueError, search, 'foo.bar', ['name Morice'])
self.assertCalls()
self.assertOutput('')
def test_count(self):
count = self.client.count
count('foo.bar', ['name like Morice'])
count('foo.bar', ['name = mushroom', 'state != draft'])
count('foo.bar', [('name', 'like', 'Morice')])
self.client.execute('foo.bar', 'search_count', [('name like Morice')])
count('foo.bar', [])
count('foo.bar')
domain = [('name', 'like', 'Morice')]
domain2 = [('name', '=', 'mushroom'), ('state', '!=', 'draft')]
self.assertCalls(
OBJ('foo.bar', 'search_count', domain),
OBJ('foo.bar', 'search_count', domain2),
OBJ('foo.bar', 'search_count', domain),
OBJ('foo.bar', 'search_count', domain),
OBJ('foo.bar', 'search_count', []),
OBJ('foo.bar', 'search_count', []),
)
self.assertOutput('')
# No longer supported since 1.6
count('foo.bar', 'name like Morice')
self.assertCalls(OBJ('foo.bar', 'search_count', 'name like Morice'))
self.assertRaises(TypeError, count)
self.assertRaises(TypeError, count,
['name like Morice'], limit=2)
self.assertRaises(TypeError, count,
['name like Morice'], offset=80, limit=99)
self.assertRaises(TypeError, count,
['name like Morice'], order='name ASC')
self.assertRaises(AssertionError, count, object())
self.assertRaises(ValueError, count, 'foo.bar', ['abc'])
self.assertRaises(ValueError, count, 'foo.bar', ['< id'])
self.assertRaises(ValueError, count, 'foo.bar', ['name Morice'])
self.assertCalls()
self.assertOutput('')
def test_read_simple(self):
read = self.client.read
self.service.object.execute.side_effect = self.obj_exec
read('foo.bar', 42)
read('foo.bar', [42])
read('foo.bar', [13, 17])
read('foo.bar', [42], 'first_name')
self.assertCalls(
OBJ('foo.bar', 'read', [42], None),
OBJ('foo.bar', 'read', [42], None),
OBJ('foo.bar', 'read', [13, 17], None),
OBJ('foo.bar', 'read', [42], ['first_name']),
)
self.assertOutput('')
def test_read_complex(self):
read = self.client.read
self.service.object.execute.side_effect = self.obj_exec
searchterm = 'name like Morice'
self.assertEqual(read('foo.bar', [searchterm]), [DIC1, DIC2])
self.assertEqual(read('foo.bar', [searchterm], limit=2), [DIC1, DIC2])
self.assertEqual(read('foo.bar', [searchterm], offset=80, limit=99),
[DIC1, DIC2])
self.assertEqual(read('foo.bar', [searchterm], order='name ASC'),
[DIC2, DIC1])
read('foo.bar', [searchterm], 'birthdate city')
read('foo.bar', [searchterm], 'birthdate city', limit=2)
read('foo.bar', [searchterm], limit=2, fields=['birthdate', 'city'])
read('foo.bar', [searchterm], order='name ASC')
read('foo.bar', ['name = mushroom', 'state != draft'])
read('foo.bar', [('name', 'like', 'Morice')])
self.client.execute('foo.bar', 'read', ['name like Morice'])
rv = read('foo.bar', ['name like Morice'],
'aaa %(birthdate)s bbb %(city)s', offset=80, limit=99)
self.assertEqual(rv, ['aaa v_birthdate_4001 bbb v_city_4001',
'aaa v_birthdate_4002 bbb v_city_4002'])
def call_read(fields=None):
return OBJ('foo.bar', 'read', [ID2, ID1], fields)
domain = [('name', 'like', 'Morice')]
domain2 = [('name', '=', 'mushroom'), ('state', '!=', 'draft')]
self.assertCalls(
OBJ('foo.bar', 'search', domain), call_read(),
OBJ('foo.bar', 'search', domain, 0, 2, None), call_read(),
OBJ('foo.bar', 'search', domain, 80, 99, None), call_read(),
OBJ('foo.bar', 'search', domain, 0, None, 'name ASC'),
call_read(),
OBJ('foo.bar', 'search', domain), call_read(['birthdate', 'city']),
OBJ('foo.bar', 'search', domain, 0, 2, None),
call_read(['birthdate', 'city']),
OBJ('foo.bar', 'search', domain, 0, 2, None),
call_read(['birthdate', 'city']),
OBJ('foo.bar', 'search', domain, 0, None, 'name ASC'),
call_read(),
OBJ('foo.bar', 'search', domain2), call_read(),
OBJ('foo.bar', 'search', domain), call_read(),
OBJ('foo.bar', 'search', domain), call_read(),
OBJ('foo.bar', 'search', domain, 80, 99, None),
call_read(['birthdate', 'city']),
)
self.assertOutput('')
def test_read_false(self):
read = self.client.read
self.service.object.execute.side_effect = self.obj_exec
self.assertEqual(read('foo.bar', False), False)
self.assertEqual(read('foo.bar', [False]), [])
self.assertEqual(read('foo.bar', [False, False]), [])
self.assertEqual(read('foo.bar', [False], 'first_name'), [])
self.assertEqual(read('foo.bar', [False], order=True),
[False])
self.assertEqual(read('foo.bar', [False, False], order=True),
[False, False])
self.assertEqual(read('foo.bar', [False], 'first_name', order=True),
[False])
self.assertEqual(read('foo.bar', [], 'first_name'), False)
self.assertEqual(read('foo.bar', [], 'first_name', order=True), False)
self.assertCalls()
self.assertEqual(read('foo.bar', [False, 42]), [IdentDict(42)])
self.assertEqual(read('foo.bar', [False, 13, 17, False]),
[IdentDict(17), IdentDict(13)])
self.assertEqual(read('foo.bar', [13, False, 17], 'first_name'),
['v_first_name_17', 'v_first_name_13'])
self.assertEqual(read('foo.bar', [False, 42], order=True),
[False, IdentDict(42)])
self.assertEqual(read('foo.bar', [False, 13, 17, False], order=True),
[False, IdentDict(13), IdentDict(17), False])
self.assertEqual(read('foo.bar', [13, False, 17], 'city', order=True),
['v_city_13', False, 'v_city_17'])
self.assertCalls(
OBJ('foo.bar', 'read', [42], None),
OBJ('foo.bar', 'read', [13, 17], None),
OBJ('foo.bar', 'read', [13, 17], ['first_name']),
OBJ('foo.bar', 'read', [42], None),
OBJ('foo.bar', 'read', [13, 17], None),
OBJ('foo.bar', 'read', [13, 17], ['city']),
)
self.assertOutput('')
def test_read_invalid(self):
read = self.client.read
self.service.object.execute.side_effect = self.obj_exec
domain = [('name', 'like', 'Morice')]
# No longer supported since 1.6
read('foo.bar', 'name like Morice')
read('foo.bar', ['name like Morice'], missingkey=42)
self.assertCalls(
OBJ('foo.bar', 'read', ['name like Morice'], None),
OBJ('foo.bar', 'search', domain),
OBJ('foo.bar', 'read', ANY, None))
self.assertOutput('Ignoring: missingkey = 42\n')
self.assertRaises(TypeError, read)
self.assertRaises(AssertionError, read, object())
self.assertRaises(AssertionError, read, 'foo.bar')
self.assertRaises(ValueError, read, 'foo.bar', ['abc'])
self.assertRaises(ValueError, read, 'foo.bar', ['< id'])
self.assertRaises(ValueError, read, 'foo.bar', ['name Morice'])
self.assertCalls()
self.assertOutput('')
def test_method(self, method_name='method', single_id=True):
method = getattr(self.client, method_name)
single_id = single_id and 42 or [42]
method('foo.bar', 42)
method('foo.bar', [42])
method('foo.bar', [13, 17])
self.client.execute('foo.bar', method_name, [42])
method('foo.bar', [])
self.assertCalls(
OBJ('foo.bar', method_name, single_id),
OBJ('foo.bar', method_name, [42]),
OBJ('foo.bar', method_name, [13, 17]),
OBJ('foo.bar', method_name, [42]),
OBJ('foo.bar', method_name, []),
)
self.assertRaises(TypeError, method)
self.assertRaises(AssertionError, method, object())
self.assertOutput('')
def test_standard_methods(self):
for method in 'write', 'create', 'copy', 'unlink':
self.test_method(method)
self.test_method('perm_read', single_id=False)
def test_model(self):
self.service.object.execute.side_effect = self.obj_exec
self.assertTrue(self.client.models('foo.bar'))
self.assertCalls(
OBJ('ir.model', 'search', [('model', 'like', 'foo.bar')]),
OBJ('ir.model', 'read', [ID2, ID1], ('model',)),
)
self.assertOutput('')
self.assertRaises(erppeek.Error, self.client.model, 'foo.bar')
self.assertCalls(
OBJ('ir.model', 'search', [('model', 'like', 'foo.bar')]),
OBJ('ir.model', 'read', [ID2, ID1], ('model',)),
)
self.assertOutput('')
self.service.object.execute.side_effect = [
sentinel.IDS, [{'id': 13, 'model': 'foo.bar'}]]
self.assertIsInstance(self.client.model('foo.bar'), erppeek.Model)
self.assertIs(self.client.model('foo.bar'),
erppeek.Model(self.client, 'foo.bar'))
self.assertIs(self.client.model('foo.bar'),
self.client.FooBar)
self.assertCalls(
OBJ('ir.model', 'search', [('model', 'like', 'foo.bar')]),
OBJ('ir.model', 'read', sentinel.IDS, ('model',)),
)
self.assertOutput('')
def test_keys(self):
self.service.object.execute.side_effect = [
sentinel.IDS, [{'model': 'foo.bar'}], ['spam']]
self.assertTrue(self.client.keys('foo.bar'))
self.assertCalls(
OBJ('ir.model', 'search', [('model', 'like', 'foo.bar')]),
OBJ('ir.model', 'read', sentinel.IDS, ('model',)),
OBJ('foo.bar', 'fields_get_keys'),
)
self.assertOutput('')
def test_fields(self):
self.service.object.execute.side_effect = [
sentinel.IDS, [{'model': 'foo.bar'}], {'spam': sentinel.FIELD}]
self.assertTrue(self.client.fields('foo.bar'))
self.assertCalls(
OBJ('ir.model', 'search', [('model', 'like', 'foo.bar')]),
OBJ('ir.model', 'read', sentinel.IDS, ('model',)),
OBJ('foo.bar', 'fields_get'),
)
self.assertOutput('')
def test_field(self):
self.service.object.execute.side_effect = [
sentinel.IDS, [{'model': 'foo.bar'}], {'spam': sentinel.FIELD}]
self.assertTrue(self.client.field('foo.bar', 'spam'))
self.assertRaises(TypeError, self.client.field)
self.assertRaises(TypeError, self.client.field, 'foo.bar')
self.assertCalls(
OBJ('ir.model', 'search', [('model', 'like', 'foo.bar')]),
OBJ('ir.model', 'read', sentinel.IDS, ('model',)),
OBJ('foo.bar', 'fields_get'),
)
self.assertOutput('')
def test_access(self):
self.assertTrue(self.client.access('foo.bar'))
self.assertCalls(OBJ('ir.model.access', 'check', 'foo.bar', 'read'))
self.assertOutput('')
def test_execute_kw(self):
execute_kw = self.client.execute_kw
execute_kw('foo.bar', 'any_method', 42)
execute_kw('foo.bar', 'any_method', [42])
execute_kw('foo.bar', 'any_method', [13, 17])
self.assertCalls(
('object.execute_kw', AUTH, 'foo.bar', 'any_method', 42),
('object.execute_kw', AUTH, 'foo.bar', 'any_method', [42]),
('object.execute_kw', AUTH, 'foo.bar', 'any_method', [13, 17]),
)
self.assertOutput('')
def test_exec_workflow(self):
exec_workflow = self.client.exec_workflow
self.assertTrue(exec_workflow('foo.bar', 'light', 42))
self.assertRaises(TypeError, exec_workflow)
self.assertRaises(TypeError, exec_workflow, 'foo.bar')
self.assertRaises(TypeError, exec_workflow, 'foo.bar', 'rip')
self.assertRaises(TypeError, exec_workflow, 'foo.bar', 'rip', 42, None)
self.assertRaises(AssertionError, exec_workflow, 42, 'rip', 42)
self.assertRaises(AssertionError, exec_workflow, 'foo.bar', 42, 42)
self.assertCalls(
('object.exec_workflow', AUTH, 'foo.bar', 'light', 42),
)
self.assertOutput('')
def test_wizard(self):
wizard = self.client.wizard
self.service.wizard.create.return_value = ID1
self.assertTrue(wizard('foo.bar'))
self.assertTrue(wizard('billy', action='shake'))
self.assertTrue(wizard(42, action='kick'))
self.assertRaises(TypeError, wizard)
self.assertCalls(
('wizard.create', AUTH, 'foo.bar'),
('wizard.create', AUTH, 'billy'),
('wizard.execute', AUTH, ID1, {}, 'shake', None),
('wizard.execute', AUTH, 42, {}, 'kick', None),
)
self.assertOutput('')
def test_report(self):
self.assertTrue(self.client.report('foo.bar', sentinel.IDS))
self.assertCalls(
('report.report', AUTH, 'foo.bar', sentinel.IDS),
)
self.assertOutput('')
def test_render_report(self):
self.assertTrue(self.client.render_report('foo.bar', sentinel.IDS))
self.assertCalls(
('report.render_report', AUTH, 'foo.bar', sentinel.IDS),
)
self.assertOutput('')
def test_report_get(self):
self.assertTrue(self.client.report_get(ID1))
self.assertCalls(
('report.report_get', AUTH, ID1),
)
self.assertOutput('')
def _module_upgrade(self, button='upgrade'):
execute_return = [
[7, 0], [42], [], {'name': 'Upgrade'}, [4, 42, 5],
[{'id': 4, 'state': ANY, 'name': ANY},
{'id': 5, 'state': ANY, 'name': ANY},
{'id': 42, 'state': ANY, 'name': ANY}], ANY]
action = getattr(self.client, button)
imm = ('object.execute', AUTH, 'ir.module.module')
bmu = ('object.execute', AUTH, 'base.module.upgrade')
expected_calls = [
imm + ('update_list',),
imm + ('search', [('name', 'in', ('dummy', 'spam'))]),
imm + ('search', [('state', 'not in', STABLE)]),
imm + ('button_' + button, [42]),
imm + ('search', [('state', 'not in', STABLE)]),
imm + ('read', [4, 42, 5], ['name', 'state']),
bmu + ('upgrade_module', []),
]
if button == 'uninstall':
execute_return[3:3] = [[], ANY]
expected_calls[3:3] = [
imm + ('search', [('id', 'in', [42]),
('state', '!=', 'installed'),
('state', '!=', 'to upgrade'),
('state', '!=', 'to remove')]),
imm + ('write', [42], {'state': 'to remove'}),
]
self.service.object.execute.side_effect = execute_return
result = action('dummy', 'spam')
self.assertIsNone(result)
self.assertCalls(*expected_calls)
self.assertIn('to process', self.stdout.popvalue())
self.assertOutput('')
def test_module_upgrade(self):
self._module_upgrade('install')
self._module_upgrade('upgrade')
self._module_upgrade('uninstall')
class TestClientApi50(TestClientApi):
"""Test the Client API for OpenERP 5."""
server_version = '5.0'
test_execute_kw = test_render_report = _skip_test
def test_create_database(self):
create_database = self.client.create_database
mock.patch('time.sleep').start()
self.client.db.create.return_value = ID1
self.client.db.get_progress.return_value = \
[1, [{'login': 'admin', 'password': 'PP'}]]
self.client.db.list.side_effect = [['db1'], ['db2']]
create_database('abc', 'db1')
create_database('xyz', 'db2', user_password='<PASSWORD>', lang='fr_FR')
self.assertCalls(
call.db.create('abc', 'db1', False, 'en_US', 'admin'),
call.db.get_progress('abc', ID1),
call.db.list(),
call.common.login('db1', 'admin', 'admin'),
call.db.create('xyz', 'db2', False, 'fr_FR', 'secret'),
call.db.get_progress('xyz', ID1),
call.db.list(),
call.common.login('db2', 'admin', 'secret'),
)
self.assertOutput('')
def _module_upgrade(self, button='upgrade'):
execute_return = [
[7, 0], [42], [], {'name': 'Upgrade'}, [4, 42, 5],
[{'id': 4, 'state': ANY, 'name': ANY},
{'id': 5, 'state': ANY, 'name': ANY},
{'id': 42, 'state': ANY, 'name': ANY}]]
self.service.wizard.create.return_value = 17
self.service.wizard.execute.return_value = {'state': (['config'],)}
action = getattr(self.client, button)
imm = ('object.execute', AUTH, 'ir.module.module')
expected_calls = [
imm + ('update_list',),
imm + ('search', [('name', 'in', ('dummy', 'spam'))]),
imm + ('search', [('state', 'not in', STABLE)]),
imm + ('button_' + button, [42]),
imm + ('search', [('state', 'not in', STABLE)]),
imm + ('read', [4, 42, 5], ['name', 'state']),
('wizard.create', AUTH, 'module.upgrade'),
('wizard.execute', AUTH, 17, {}, 'start', None),
]
if button == 'uninstall':
execute_return[3:3] = [[], ANY]
expected_calls[3:3] = [
imm + ('search', [('id', 'in', [42]),
('state', '!=', 'installed'),
('state', '!=', 'to upgrade'),
('state', '!=', 'to remove')]),
imm + ('write', [42], {'state': 'to remove'}),
]
self.service.object.execute.side_effect = execute_return
result = action('dummy', 'spam')
self.assertIsNone(result)
self.assertCalls(*expected_calls)
self.assertIn('to process', self.stdout.popvalue())
self.assertOutput('')
class TestClientApi90(TestClientApi):
"""Test the Client API for Odoo 9."""
server_version = '9.0'
test_wizard = _skip_test
class TestClientApi11(TestClientApi):
"""Test the Client API for Odoo 11."""
server_version = '11.0'
test_wizard = _skip_test
test_report = test_render_report = test_report_get = _skip_test
|
.venv/lib/python3.8/site-packages/pandas/tests/series/methods/test_at_time.py
|
acrucetta/Chicago_COVI_WebApp
| 115 |
131044
|
<gh_stars>100-1000
from datetime import time
import numpy as np
import pytest
from pandas._libs.tslibs import timezones
from pandas import DataFrame, Series, date_range
import pandas._testing as tm
class TestAtTime:
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_localized_at_time(self, tzstr):
tz = timezones.maybe_get_tz(tzstr)
rng = date_range("4/16/2012", "5/1/2012", freq="H")
ts = Series(np.random.randn(len(rng)), index=rng)
ts_local = ts.tz_localize(tzstr)
result = ts_local.at_time(time(10, 0))
expected = ts.at_time(time(10, 0)).tz_localize(tzstr)
tm.assert_series_equal(result, expected)
assert timezones.tz_compare(result.index.tz, tz)
def test_at_time(self):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time("9:30")
expected = ts.at_time(time(9, 30))
tm.assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.loc[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
result.index = result.index._with_freq(None)
tm.assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.loc["1/4/2000":]
result = chunk.loc[time(9, 30)]
expected = result_df[-1:]
# Without resetting the freqs, these are 5 min and 1440 min, respectively
result.index = result.index._with_freq(None)
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range("1/1/2000", "1/31/2000")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
tm.assert_series_equal(result, ts)
# time doesn't exist
rng = date_range("1/1/2012", freq="23Min", periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time("16:00")
assert len(rs) == 0
def test_at_time_raises(self):
# GH20725
ser = Series("a b c".split())
msg = "Index must be DatetimeIndex"
with pytest.raises(TypeError, match=msg):
ser.at_time("00:00")
|
e3nn/math/_linalg.py
|
ninarina12/e3nn
| 385 |
131050
|
<reponame>ninarina12/e3nn<gh_stars>100-1000
from typing import Tuple
import torch
def direct_sum(*matrices):
r"""Direct sum of matrices, put them in the diagonal
"""
front_indices = matrices[0].shape[:-2]
m = sum(x.size(-2) for x in matrices)
n = sum(x.size(-1) for x in matrices)
total_shape = list(front_indices) + [m, n]
out = matrices[0].new_zeros(total_shape)
i, j = 0, 0
for x in matrices:
m, n = x.shape[-2:]
out[..., i: i + m, j: j + n] = x
i += m
j += n
return out
@torch.jit.script
def orthonormalize(
original: torch.Tensor,
eps: float = 1e-9
) -> Tuple[torch.Tensor, torch.Tensor]:
r"""orthonomalize vectors
Parameters
----------
original : `torch.Tensor`
list of the original vectors :math:`x`
eps : float
a small number
Returns
-------
final : `torch.Tensor`
list of orthonomalized vectors :math:`y`
matrix : `torch.Tensor`
the matrix :math:`A` such that :math:`y = A x`
"""
assert original.dim() == 2
dim = original.shape[1]
final = []
matrix = []
for i, x in enumerate(original):
cx = x.new_zeros(len(original))
cx[i] = 1
for j, y in enumerate(final):
c = torch.dot(x, y)
x = x - c * y
cx = cx - c * matrix[j]
if x.norm() > 2 * eps:
c = 1 / x.norm()
x = c * x
cx = c * cx
x[x.abs() < eps] = 0
cx[cx.abs() < eps] = 0
c = x[x.nonzero()[0, 0]].sign()
x = c * x
cx = c * cx
final += [x]
matrix += [cx]
final = torch.stack(final) if len(final) > 0 else original.new_zeros((0, dim))
matrix = torch.stack(matrix) if len(matrix) > 0 else original.new_zeros((0, len(original)))
return final, matrix
@torch.jit.script
def complete_basis(
vecs: torch.Tensor,
eps: float = 1e-9
) -> torch.Tensor:
assert vecs.dim() == 2
dim = vecs.shape[1]
base = [x / x.norm() for x in vecs]
expand = []
for x in torch.eye(dim, device=vecs.device, dtype=vecs.dtype):
for y in base + expand:
x -= torch.dot(x, y) * y
if x.norm() > 2 * eps:
x /= x.norm()
x[x.abs() < eps] = x.new_zeros(())
x *= x[x.nonzero()[0, 0]].sign()
expand += [x]
expand = torch.stack(expand) if len(expand) > 0 else vecs.new_zeros(0, dim)
return expand
|
configs/seg/_base_/models/deeplabv3_central_mnb4.py
|
OpenGVLab/gv-benchmark
| 106 |
131053
|
# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
# pretrained='checkpoints/metanet-b4/det_fmtb4_v1.pth.tar',
backbone=dict(
type='Central_Model',
backbone_name='MTB4',
task_names=('gv_patch', 'gv_global'),
main_task_name='gv_global',
trans_type='crossconvhrnetlayer',
task_name_to_backbone={
'gv_global': {
'repeats': [2, 3, 6, 6, 6, 12],
'expansion': [1, 4, 6, 3, 2, 5],
'channels': [32, 64, 128, 192, 192, 384],
'final_drop': 0.0,
'block_ops': ['MBConv3x3'] * 4 + ['SABlock'] * 2,
'input_size': 256
},
'gv_patch': {
'repeats': [2, 3, 6, 6, 6, 12],
'expansion': [1, 4, 6, 3, 2, 5],
'channels': [32, 64, 128, 192, 192, 384],
'final_drop': 0.0,
'block_ops': ['MBConv3x3'] * 4 + ['SABlock'] * 2,
'input_size': 256
}
},
layer2channel={
'layer1': 64,
'layer2': 128,
'layer3': 192
},
layer2auxlayers={
'layer1': [
'layer1',
],
'layer2': [
'layer1',
'layer2',
],
'layer3': ['layer1', 'layer2', 'layer3'],
},
trans_layers=['layer1', 'layer2', 'layer3'],
channels=[64, 128, 192],
),
decode_head=dict(type='ASPPHead',
in_channels=384,
in_index=3,
channels=512,
dilations=(1, 12, 24, 36),
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0)),
auxiliary_head=dict(type='FCNHead',
in_channels=192,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
custom_imports = dict(imports=[
'gvbenchmark.seg.models.backbones.central_model',
],
allow_failed_imports=False)
|
modules/adapters/adapter/transports.py
|
timfjord/sublime_debugger
| 225 |
131069
|
<reponame>timfjord/sublime_debugger<filename>modules/adapters/adapter/transports.py
from __future__ import annotations
from ...typecheck import *
from ...import core
from ...dap import Transport
import socket
import os
import subprocess
import threading
class Process:
@staticmethod
async def check_output(command: list[str], cwd: str|None = None) -> bytes:
return await core.run_in_executor(lambda: subprocess.check_output(command, cwd=cwd))
def __init__(self, command: list[str], cwd: str|None = None):
# taken from Default/exec.py
# Hide the console window on Windows
startupinfo = None
if os.name == "nt":
startupinfo = subprocess.STARTUPINFO() #type: ignore
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW #type: ignore
self.process = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
shell=False,
bufsize=0,
startupinfo=startupinfo,
cwd = cwd)
self.stdin = self.process.stdin
self.stderr = self.process.stderr
self.stdout = self.process.stdout
self.closed = False
def _readline(self, pipe) -> bytes:
if l := pipe.readline():
return l
raise EOFError
def _read(self, pipe, n: int) -> bytes:
if l := pipe.read(n):
return l
raise EOFError
async def readline(self, pipe) -> bytes:
return await core.run_in_executor(lambda: self._readline(pipe))
async def read(self, pipe, nbytes) -> bytes:
return await core.run_in_executor(lambda: self._read(pipe, nbytes))
def dispose(self):
self.closed = True
try:
self.process.terminate()
except Exception as e:
core.log_exception()
class StdioTransport(Transport):
def __init__(self, log: core.Logger, command: list[str], cwd: str|None = None, ignore_stderr: bool = False):
log.log('transport', f'⟸ process/starting :: {command}')
self.process = Process(command, cwd)
if ignore_stderr:
thread = threading.Thread(target=self._read, args=(self.process.stderr, print))
else:
thread = threading.Thread(target=self._read, args=(self.process.stderr, log.info))
thread.start()
def _read(self, file: Any, callback: Callable[[str], None]) -> None:
while True:
try:
line = file.read(2**15).decode('UTF-8')
if not line:
core.log_info('Nothing to read from process, closing')
break
core.call_soon_threadsafe(callback, line)
except Exception as e:
core.log_exception()
break
self.process.dispose()
def write(self, message: bytes) -> None:
self.process.stdin.write(message)
self.process.stdin.flush()
def readline(self) -> bytes:
if l := self.process.stdout.readline():
return l
raise EOFError
def read(self, n: int) -> bytes:
if l := self.process.stdout.read(n):
return l
raise EOFError
def dispose(self) -> None:
self.process.dispose()
class SocketTransport(Transport):
def __init__(self, log: core.Logger, host: str, port: int, cwd: str|None = None):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
self.stdin = self.socket.makefile('wb')
self.stdout = self.socket.makefile('rb')
def write(self, message: bytes) -> None:
self.stdin.write(message)
self.stdin.flush()
def readline(self) -> bytes:
if l := self.stdout.readline():
return l
raise EOFError
def read(self, n: int) -> bytes:
if l := self.stdout.read(n):
return l
raise EOFError
def dispose(self) -> None:
try:
self.socket.close()
except:
core.log_exception()
|
funannotate/library.py
|
davised/funannotate
| 199 |
131081
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from Bio import BiopythonWarning
import os
import uuid
import io
import subprocess
import logging
import sys
import csv
import time
import re
import shutil
import platform
import distro
import multiprocessing
import itertools
import hashlib
import math
import gzip
import operator
import textwrap
import errno
import datetime
from natsort import natsorted
import funannotate.resources as resources
from funannotate.interlap import InterLap
from collections import defaultdict
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
try:
from itertools import izip as zip
except ImportError:
pass
import warnings
from Bio import SeqIO
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from Bio import SearchIO
warnings.simplefilter('ignore', BiopythonWarning)
# get the working directory, so you can move back into DB folder to find the files you need
global parentdir
parentdir = os.path.join(os.path.dirname(__file__))
GeneMark2GFF = os.path.join(parentdir, 'aux_scripts', 'genemark_gtf2gff3.pl')
class colr:
GRN = '\033[92m'
END = '\033[0m'
WARN = '\033[93m'
class suppress_stdout_stderr(object):
'''
A context manager for doing a "deep suppression" of stdout and stderr in
Python, i.e. will suppress all print, even if the print originates in a
compiled C/Fortran sub-function.
This will not suppress raised exceptions, since exceptions are printed
to stderr just before a script exits, and after the context manager has
exited (at least, I think that is why it lets exceptions through).
'''
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
self.save_fds = (os.dup(1), os.dup(2))
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
# Close the null files
os.close(self.null_fds[0])
os.close(self.null_fds[1])
class gzopen(object):
"""Generic opener that decompresses gzipped files
if needed. Encapsulates an open file or a GzipFile.
Use the same way you would use 'open()'.
"""
def __init__(self, fname):
f = open(fname)
# Read magic number (the first 2 bytes) and rewind.
magic_number = f.read(2)
f.seek(0)
# Encapsulated 'self.f' is a file or a GzipFile.
if magic_number == b'\x1f\x8b':
self.f = gzip.GzipFile(fileobj=f)
else:
self.f = f
# Define '__enter__' and '__exit__' to use in
# 'with' blocks. Always close the file and the
# GzipFile if applicable.
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
try:
self.f.fileobj.close()
except AttributeError:
pass
finally:
self.f.close()
# Reproduce the interface of an open file
# by encapsulation.
def __getattr__(self, name):
return getattr(self.f, name)
def __iter__(self):
return iter(self.f)
def __next__(self):
return next(self.f)
def createdir(name):
try:
os.makedirs(name)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
pass
def softwrap(string, every=80):
lines = []
for i in range(0, len(string), every):
lines.append(string[i:i+every])
return '\n'.join(lines)
def len_without_format(text):
try:
return len(remove_formatting(text))
except TypeError:
return len(str(text))
def remove_formatting(text):
return re.sub('\033.*?m', '', text)
def colour(text, text_colour):
bold_text = 'bold' in text_colour
text_colour = text_colour.replace('bold', '')
underline_text = 'underline' in text_colour
text_colour = text_colour.replace('underline', '')
text_colour = text_colour.replace('_', '')
text_colour = text_colour.replace(' ', '')
text_colour = text_colour.lower()
if 'red' in text_colour:
coloured_text = RED
elif 'green' in text_colour:
coloured_text = GREEN
elif 'yellow' in text_colour:
coloured_text = YELLOW
elif 'dim' in text_colour:
coloured_text = DIM
else:
coloured_text = ''
if bold_text:
coloured_text += BOLD
if underline_text:
coloured_text += UNDERLINE
if not coloured_text:
return text
coloured_text += text + END_FORMATTING
return coloured_text
END_FORMATTING = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
RED = '\033[31m'
GREEN = '\033[32m'
MAGENTA = '\033[35m'
YELLOW = '\033[93m'
DIM = '\033[2m'
def green(text):
return GREEN + text + END_FORMATTING
def bold_green(text):
return GREEN + BOLD + text + END_FORMATTING
def red(text):
return RED + text + END_FORMATTING
def magenta(text):
return MAGENTA + text + END_FORMATTING
def bold_red(text):
return RED + BOLD + text + END_FORMATTING
def bold(text):
return BOLD + text + END_FORMATTING
def bold_underline(text):
return BOLD + UNDERLINE + text + END_FORMATTING
def underline(text):
return UNDERLINE + text + END_FORMATTING
def dim(text):
return DIM + text + END_FORMATTING
def dim_underline(text):
return DIM + UNDERLINE + text + END_FORMATTING
def bold_yellow(text):
return YELLOW + BOLD + text + END_FORMATTING
def bold_yellow_underline(text):
return YELLOW + BOLD + UNDERLINE + text + END_FORMATTING
def bold_red_underline(text):
return RED + BOLD + UNDERLINE + text + END_FORMATTING
def print_table(table, alignments='', max_col_width=30, col_separation=3, indent=2,
row_colour=None, sub_colour=None, row_extra_text=None, leading_newline=False,
subsequent_indent='', return_str=False, header_format='underline',
hide_header=False, fixed_col_widths=None, left_align_header=True,
bottom_align_header=True, verbosity=1):
"""
Args:
table: a list of lists of strings (one row is one list, all rows should be the same length)
alignments: a string of L and R, indicating the alignment for each row
max_col_width: values longer than this will be wrapped
col_separation: the number of spaces between columns
indent: the number of spaces between the table and the left side of the terminal
row_colour: a dictionary of row indices and their colour names
sub_colour: a dictionary of values to colour names for which the text colour will be set
row_extra_text: a dictionary of row indices and extra text to display after the row
leading_newline: if True, the function will print a blank line above the table
subsequent_indent: this string will be added to the start of wrapped text lines
return_str: if True, this function will return a string of the table instead of printing it
header_format: the formatting (colour, underline, etc) of the header line
hide_header: if True, the header is not printed
fixed_col_widths: a list to specify exact column widths (automatic if not used)
left_align_header: if False, the header will follow the column alignments
bottom_align_header: if False, the header will align to the top, like other rows
verbosity: the table will only be logged if the logger verbosity is >= this value
"""
# this function is written by <NAME> in Unicycler code
# modified to not support colors
column_count = len(table[0])
table = [x[:column_count] for x in table]
table = [x + [''] * (column_count - len(x)) for x in table]
if row_colour is None:
row_colour = {}
if sub_colour is None:
sub_colour = {}
if row_extra_text is None:
row_extra_text = {}
if leading_newline:
print('')
# Ensure the alignments string is the same length as the column count
alignments += 'L' * (column_count - len(alignments))
alignments = alignments[:column_count]
if fixed_col_widths is not None:
col_widths = fixed_col_widths
else:
col_widths = [0] * column_count
for row in table:
col_widths = [min(max(col_widths[i], len_without_format(x)), max_col_width)
for i, x in enumerate(row)]
separator = ' ' * col_separation
indenter = ' ' * indent
full_table_str = ''
for i, row in enumerate(table):
row = [str(x) for x in row]
if hide_header and i == 0:
continue
if fixed_col_widths is not None:
wrapped_row = []
for col, fixed_width in zip(row, fixed_col_widths):
wrapper = textwrap.TextWrapper(subsequent_indent=subsequent_indent,
width=fixed_width)
wrapped_row.append(wrapper.wrap(col))
else:
wrapper = textwrap.TextWrapper(
subsequent_indent=subsequent_indent, width=max_col_width)
wrapped_row = [wrapper.wrap(x) for x in row]
row_rows = max(len(x) for x in wrapped_row)
if i == 0 and bottom_align_header:
wrapped_row = [[''] * (row_rows - len(x)) + x for x in wrapped_row]
for j in range(row_rows):
row_line = [x[j] if j < len(x) else '' for x in wrapped_row]
aligned_row = []
for value, col_width, alignment in zip(row_line, col_widths, alignments):
if alignment == 'L' or (i == 0 and left_align_header):
aligned_row.append(value.ljust(col_width))
elif alignment == 'C':
aligned_row.append(value.center(col_width))
else:
aligned_row.append(value.rjust(col_width))
row_str = separator.join(aligned_row)
if i in row_extra_text:
row_str += row_extra_text[i]
if i == 0 and header_format:
row_str = colour(row_str, header_format)
if i in row_colour:
row_str = colour(row_str, row_colour[i])
for text, colour_name in list(sub_colour.items()):
row_str = row_str.replace(text, colour(text, colour_name))
if j < row_rows - 1 and UNDERLINE in row_str:
row_str = re.sub('\033\[4m', '', row_str)
if return_str:
full_table_str += indenter + row_str + '\n'
else:
print((indenter + row_str))
if return_str:
return full_table_str
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
out = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=open(
os.devnull, 'w'), cwd=parentdir).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', '--short', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = False
return GIT_REVISION
def Funzip(input, output, cpus):
'''
function to unzip as fast as it can, pigz -> bgzip -> gzip
'''
if which('pigz'):
cmd = ['pigz', '--decompress', '-c', '-p', str(cpus), input]
else:
cmd = ['gzip', '--decompress', '-c', input]
try:
runSubprocess2(cmd, '.', log, output)
except NameError:
with open(output, 'w') as outfile:
subprocess.call(cmd, stdout=outfile)
def Fzip(input, output, cpus):
'''
function to zip as fast as it can, pigz -> bgzip -> gzip
'''
if which('pigz'):
cmd = ['pigz', '-c', '-p', str(cpus), input]
else:
cmd = ['gzip', '-c', input]
try:
runSubprocess2(cmd, '.', log, output)
except NameError:
with open(output, 'w') as outfile:
subprocess.call(cmd, stdout=outfile)
def Fzip_inplace(input, cpus):
'''
function to zip as fast as it can, pigz -> bgzip -> gzip
'''
if which('pigz'):
cmd = ['pigz', '-f', '-p', str(cpus), input]
else:
cmd = ['gzip', '-f', input]
try:
runSubprocess(cmd, '.', log)
except NameError:
subprocess.call(cmd)
# RNA seq mediated modules
def concatenateReads(input, output):
'''
Since I can't seem to get the comma separated lists to work with subprocess modules, just
concatenate FASTQ files in order and use a single file, input should be a list of FASTQ files
using system cat here so that gzipped files are concatenated correctly
'''
cmd = ['cat']
cmd = cmd + input
runSubprocess2(cmd, '.', log, output)
def which2(program):
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def open_pipe(command, mode='r', buff=1024*1024):
import subprocess
import signal
if 'r' in mode:
return subprocess.Popen(command, shell=True, bufsize=buff,
stdout=subprocess.PIPE, universal_newlines=True,
preexec_fn=lambda: signal.signal(
signal.SIGPIPE, signal.SIG_DFL)
).stdout
elif 'w' in mode:
return subprocess.Popen(command, shell=True, bufsize=buff, universal_newlines=True,
stdin=subprocess.PIPE).stdin
return None
NORMAL = 0
PROCESS = 1
PARALLEL = 2
WHICH_BZIP2 = which2("bzip2")
WHICH_PBZIP2 = which2("pbzip2")
def open_bz2(filename, mode='r', buff=1024*1024, external=PARALLEL):
if external is None or external == NORMAL:
import bz2
return bz2.BZ2File(filename, mode, buff)
elif external == PROCESS:
if not WHICH_BZIP2:
return open_bz2(filename, mode, buff, NORMAL)
if 'r' in mode:
return open_pipe("bzip2 -dc " + filename, mode, buff)
elif 'w' in mode:
return open_pipe("bzip2 >" + filename, mode, buff)
elif external == PARALLEL:
if not WHICH_PBZIP2:
return open_bz2(filename, mode, buff, PROCESS)
if 'r' in mode:
return open_pipe("pbzip2 -dc " + filename, mode, buff)
elif 'w' in mode:
return open_pipe("pbzip2 >" + filename, mode, buff)
return None
WHICH_GZIP = which2("gzip")
WHICH_PIGZ = which2("pigz")
def open_gz(filename, mode='r', buff=1024*1024, external=PARALLEL):
if external is None or external == NORMAL:
import gzip
return gzip.GzipFile(filename, mode, buff)
elif external == PROCESS:
if not WHICH_GZIP:
return open_gz(filename, mode, buff, NORMAL)
if 'r' in mode:
return open_pipe("gzip -dc " + filename, mode, buff)
elif 'w' in mode:
return open_pipe("gzip >" + filename, mode, buff)
elif external == PARALLEL:
if not WHICH_PIGZ:
return open_gz(filename, mode, buff, PROCESS)
if 'r' in mode:
return open_pipe("pigz -dc " + filename, mode, buff)
elif 'w' in mode:
return open_pipe("pigz >" + filename, mode, buff)
return None
WHICH_XZ = which2("xz")
def open_xz(filename, mode='r', buff=1024*1024, external=PARALLEL):
if WHICH_XZ:
if 'r' in mode:
return open_pipe("xz -dc " + filename, mode, buff)
elif 'w' in mode:
return open_pipe("xz >" + filename, mode, buff)
return None
def zopen(filename, mode='r', buff=1024*1024, external=PARALLEL):
"""
Open pipe, zipped, or unzipped file automagically
# external == 0: normal zip libraries
# external == 1: (zcat, gzip) or (bzcat, bzip2)
# external == 2: (pigz -dc, pigz) or (pbzip2 -dc, pbzip2)
"""
if 'r' in mode and 'w' in mode:
return None
if filename.startswith('!'):
return open_pipe(filename[1:], mode, buff)
elif filename.endswith('.bz2'):
return open_bz2(filename, mode, buff, external)
elif filename.endswith('.gz'):
return open_gz(filename, mode, buff, external)
elif filename.endswith('.xz'):
return open_xz(filename, mode, buff, external)
else:
return open(filename, mode, buff)
return None
def execute(cmd):
DEVNULL = open(os.devnull, 'w')
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE,
universal_newlines=True, stderr=DEVNULL)
for stdout_line in iter(popen.stdout.readline, ""):
yield stdout_line
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
def getDiamondVersion():
vers = subprocess.Popen(['diamond', 'version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True).communicate()
if vers[1] == '': # then this is older version and parse the stdout
vers = vers[0].split('version ')[-1].rstrip()
else:
vers = vers[1].split()[1].replace('v', '')
return vers
def CheckDiamondDB(database):
diamond_version = getDiamondVersion()
DBvers = None
for line in execute(['diamond', 'dbinfo', '-d', database]):
if 'Database format version' in line:
DBvers = int(line.strip().split()[-1])
if not DBvers:
log.error('Could not determine diamond database version')
return False
runVers = None
if diamond_version < '0.9.10':
return False
elif diamond_version < '0.9.25':
runVers = 2
else:
runVers = 3
if runVers >= DBvers:
return True
else:
return False
def CheckFASTQandFix(forward, reverse, cpus=2):
from Bio.SeqIO.QualityIO import FastqGeneralIterator
# open and check first header, if okay exit, if not fix
file1 = FastqGeneralIterator(zopen(forward, 'rt'))
file2 = FastqGeneralIterator(zopen(reverse, 'rt'))
check = True
for read1, read2 in zip(file1, file2):
if ' ' in read1[0] and ' ' in read2[0]:
# std illumina, exit
if read1[0].split(' ', 1)[1].startswith('1') and read2[0].split(' ', 1)[1].startswith('2'):
break
else:
log.debug("R1 header: {} and R2 header: {} are not 1 and 2 as expected".format(read1[0],read2[0]))
check = False
break
elif read1[0].endswith('/1') and read2[0].endswith('/2'): # also acceptable
break
else: # it is not okay missing paired information
log.debug("R1 header: {} and R2 header: {} are missing pairing as expected".format(read1[0],read2[0]))
check = False
break
file1.close()
file2.close()
if not check:
log.error('ERROR: FASTQ headers are not properly paired, see logfile and reformat your FASTQ headers')
sys.exit(1)
'''
# now need to fix these reads
log.info(
"PE reads do not conform to Trinity naming convention (need either /1 /2 or std illumina), fixing...")
# work on forward reads first
if forward.endswith('.gz'):
Funzip(forward, forward+'.bak', cpus)
SafeRemove(forward)
else:
os.rename(forward, forward+'.bak')
# now add ending to reads
with open(forward+'.fix', 'w') as forwardfix:
for title, seq, qual in FastqGeneralIterator(open(forward+'.bak')):
title = title+'/1'
forwardfix.write("@%s\n%s\n+\n%s\n" % (title, seq, qual))
Fzip(forward+'.fix', forward, cpus)
SafeRemove(forward+'.bak')
SafeRemove(forward+'.fix')
# now work on reverse reads
if reverse.endswith('.gz'):
Funzip(reverse, reverse+'.bak', cpus)
else:
os.rename(reverse, reverse+'.bak')
with open(reverse+'.fix', 'w') as reversefix:
for title, seq, qual in FastqGeneralIterator(open(reverse+'.bak')):
title = title+'/2'
reversefix.write("@%s\n%s\n+\n%s\n" % (title, seq, qual))
# zip back up to original file
Fzip(reverse+'.fix', reverse, cpus)
SafeRemove(reverse+'.bak')
SafeRemove(reverse+'.fix')
'''
else:
log.debug('FASTQ headers seem compatible with Trinity')
return 0
def SafeRemove(input):
if os.path.isdir(input):
shutil.rmtree(input)
elif os.path.isfile(input):
os.remove(input)
else:
return
def runSubprocess(cmd, dir, logfile):
logfile.debug(' '.join(cmd))
proc = subprocess.Popen(cmd, cwd=dir, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
logfile.error('CMD ERROR: {}'.format(' '.join(cmd)))
if stdout:
logfile.error(stdout.decode("utf-8"))
if stderr:
logfile.error(stderr.decode("utf-8"))
sys.exit(1)
else:
if stdout:
logfile.debug(stdout.decode("utf-8"))
if stderr:
logfile.debug(stderr.decode("utf-8"))
def runSubprocess2(cmd, dir, logfile, output):
# function where output of cmd is STDOUT, capture STDERR in logfile
logfile.debug(' '.join(cmd))
with open(output, 'w') as out:
proc = subprocess.Popen(cmd, cwd=dir, stdout=out,
stderr=subprocess.PIPE)
stderr = proc.communicate()
if proc.returncode != 0:
logfile.error('CMD ERROR: {}'.format(' '.join(cmd)))
if stderr:
logfile.error(stderr)
sys.exit(1)
else:
if stderr:
if stderr[0] is not None:
logfile.debug(stderr)
def runSubprocess3(cmd, dir, logfile):
# function where STDOUT pipes to FNULL, capture STDERR in logfile
FNULL = open(os.devnull, 'w')
logfile.debug(' '.join(cmd))
proc = subprocess.Popen(cmd, cwd=dir, stdout=FNULL, stderr=subprocess.PIPE)
stderr = proc.communicate()
if stderr:
logfile.debug(stderr)
def runSubprocess4(cmd, dir, logfile):
# function where STDOUT and STDERR pipes to FNULL
logfile.debug(' '.join(cmd))
proc = subprocess.Popen(cmd, cwd=dir, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
logfile.error('CMD ERROR: {}'.format(' '.join(cmd)))
if stdout:
print(stdout)
if stderr:
print(stderr)
sys.exit(1)
def runSubprocess5(cmd, dir, logfile, input, output):
# function where STDOUT to file, STDIN as input, STDERR pipes to logfile
logfile.debug(' '.join(cmd))
with open(input) as infile:
with open(output, 'w') as out:
proc = subprocess.Popen(cmd, cwd=dir, stdin=infile, stdout=out,
stderr=subprocess.PIPE)
stderr = proc.communicate()
if proc.returncode != 0:
logfile.error('CMD ERROR: {}'.format(' '.join(cmd)))
if stderr:
logfile.error(stderr)
sys.exit(1)
else:
if stderr:
if stderr[0] is not None:
logfile.debug(stderr)
def runSubprocess6(cmd, dir, logfile, logfile2):
# function where cmd captured in logfile, but both stdout and stdin piped to additional logfile
logfile.debug(' '.join(cmd))
with open(logfile2, 'w') as logout:
proc = subprocess.Popen(cmd, cwd=dir, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
logfile.error('CMD ERROR: {}'.format(' '.join(cmd)))
if stdout:
logfile.error(stdout)
if stderr:
logfile.error(stderr)
sys.exit(1)
else:
if stdout:
logout.write(stdout)
if stderr:
logout.write(stderr)
def runSubprocess7(cmd, dir, logfile, output):
# function where output of cmd is STDOUT, capture STDERR in logfile
logfile.debug(' '.join(cmd))
with open(output, 'a') as out:
proc = subprocess.Popen(cmd, cwd=dir, stdout=out,
stderr=subprocess.PIPE)
stderr = proc.communicate()
if proc.returncode != 0:
logfile.error('CMD ERROR: {}'.format(' '.join(cmd)))
if stderr:
logfile.error(stderr)
sys.exit(1)
else:
if stderr:
if stderr[0] is not None:
logfile.debug(stderr)
def runSubprocess8(cmd, dir, logfile, output):
# function where output of cmd is STDOUT, capture STDERR in FNULL
logfile.debug(' '.join(cmd))
with open(output, 'w') as out:
proc = subprocess.Popen(cmd, cwd=dir, stdout=out,
stderr=subprocess.PIPE)
stderr = proc.communicate()
if proc.returncode != 0:
logfile.error('CMD ERROR: {}'.format(' '.join(cmd)))
if stderr:
logfile.error(stderr)
sys.exit(1)
def evmGFFvalidate(input, evmpath, logfile):
Validator = os.path.join(evmpath, 'EvmUtils', 'gff3_gene_prediction_file_validator.pl')
cmd = ['perl', Validator, os.path.realpath(input)]
logfile.debug(' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True)
stdout, stderr = proc.communicate()
if not stderr:
return True
else:
logfile.error(stderr.rstrip())
return False
def hashfile(afile, hasher, blocksize=65536):
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.digest()
def sha256_check(file1, file2):
files = [file1, file2]
output = [(fname, hashfile(open(fname, 'rb'), hashlib.sha256()))
for fname in files]
if output[0][1] == output[1][1]:
return True
else:
return False
def readBlocks(source, pattern):
buffer = []
for line in source:
try:
line = line.decode('utf-8')
except AttributeError:
line = line
if line.startswith(pattern):
if buffer:
yield buffer
buffer = [line]
else:
buffer.append(line)
yield buffer
def readBlocks2(source, startpattern, endpattern):
buffer = []
for line in source:
try:
line = line.decode('utf-8')
except AttributeError:
line = line
if line.startswith(startpattern) or line.endswith(endpattern):
if buffer:
yield buffer
buffer = [line]
else:
buffer.append(line)
yield buffer
def empty_line_sep(line):
return line == '\n'
def get_parent_dir(directory):
return os.path.dirname(directory)
def getSize(filename):
st = os.stat(filename)
return st.st_size
def checkinputs(filename):
if not os.path.isfile(filename):
log.error("%s is not a valid file, exiting" % filename)
sys.exit(1)
size = getSize(filename)
if size < 2: # this is 1 character...
log.error("%s appears to be empty, exiting" % filename)
sys.exit(1)
def make_tarfile(output_filename, source_dir):
import tarfile
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
def multipleReplace(text, wordDict):
for key in wordDict:
text = text.replace(key, wordDict[key])
return text
def which_path(file_name):
for path in os.environ["PATH"].split(os.pathsep):
full_path = os.path.join(path, file_name)
if os.path.exists(full_path) and os.access(full_path, os.X_OK):
return full_path
return None
def which(name):
try:
with open(os.devnull) as devnull:
diff = ['tbl2asn', 'dustmasker', 'mafft', 'signalp',
'proteinortho', 'ete3', 'phyml', 'phobius.pl', 'tantan']
if not any(name in x for x in diff):
subprocess.Popen([name], stdout=devnull,
stderr=devnull, universal_newlines=True).communicate()
else:
if name == 'signalp':
subprocess.Popen([name, '-V'], stdout=devnull,
stderr=devnull, universal_newlines=True).communicate()
elif name == 'dustmasker':
subprocess.Popen(
[name, '-version-full'], stdout=devnull, stderr=devnull, universal_newlines=True).communicate()
elif name == 'tbl2asn':
subprocess.Popen(
[name, '--help'], stdout=devnull, stderr=devnull, universal_newlines=True).communicate()
elif name == 'raxmlHPC-PTHREADS':
subprocess.Popen(
[name, '-version'], stdout=devnull, stderr=devnull, universal_newlines=True).communicate()
elif name == 'ete3':
subprocess.Popen(
[name, 'version'], stdout=devnull, stderr=devnull, universal_newlines=True).communicate()
elif name == 'phobius.pl':
subprocess.Popen([name, '-h'], stdout=devnull,
stderr=devnull, universal_newlines=True).communicate()
else:
subprocess.Popen(
[name, '--version'], stdout=devnull, stderr=devnull, universal_newlines=True).communicate()
except OSError as e:
if e.errno == errno.ENOENT:
return False
return True
def vers_tblastn():
p1 = subprocess.Popen(['tblastn', '-version'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
vers = p1.communicate()[0].split('+')[0]
vers = vers.split(' ')[-1]
return vers
def CheckDependencies(input):
missing = []
for p in input:
if which(p) is False:
missing.append(p)
if missing != []:
error = ", ".join(missing)
try:
log.error(
"Missing Dependencies: %s. Please install missing dependencies and re-run script" % (error))
except NameError:
print("Missing Dependencies: %s. Please install missing dependencies and re-run script" % (error))
sys.exit(1)
def checkannotations(input):
if input and os.path.isfile(input):
filesize = getSize(input)
if int(filesize) < 1:
return False
else:
return True
elif input and os.path.islink(input):
return True
else:
return False
def line_count(fname):
with open(fname) as f:
i = -1
for i, l in enumerate(f):
pass
return i + 1
def countfasta(input):
count = 0
with open(input, 'r') as f:
for line in f:
if line.startswith(">"):
count += 1
return count
def getGeneBasename(fastafile):
bases = []
with open(fastafile, 'r') as input:
for line in input:
line = line.replace('\n', '')
if line.startswith('>'):
line = line.replace('>', '')
transcript, gene = line.split(' ')
if '_' in gene:
Base = line.split('_')[0]+'_'
elif '-' in gene:
Base = line.split('-')[0]
else:
Base = gene
if not Base in bases:
bases.append(Base)
return bases
def get_version():
from pkg_resources import get_distribution
__version__ = get_distribution('funannotate').version
return __version__
def ver_tuple(z):
return tuple([int(x) for x in z.split('.') if x.isdigit()])
def cmp(a, b):
return (a > b) - (a < b)
def ver_cmp(a, b):
return cmp(ver_tuple(a), ver_tuple(b))
def versionCheck(a, b):
if ver_cmp(a, b) == -1:
return False
else:
return True
def checkAugustusFunc():
'''
function to try to test Augustus installation is working, note segmentation fault still results in a pass
'''
functional = False
p1 = subprocess.Popen(['augustus', '--version'], stderr=subprocess.STDOUT,
stdout=subprocess.PIPE, universal_newlines=True).communicate()
stdout, stderr = p1
if isinstance(stdout, str):
try:
stdout = stdout.decode('ascii', 'ignore').encode('ascii')
except AttributeError:
pass
version = stdout.split(' is ')[0]
model = os.path.join(parentdir, 'config', 'EOG092C0B3U.prfl')
if not os.path.isfile(model):
log.error("Testing Augustus Error: installation seems wrong, can't find prfl model")
sys.exit(1)
profile = '--proteinprofile='+model
proc = subprocess.Popen(['augustus', '--species=anidulans', profile, os.path.join(parentdir, 'config', 'busco_test.fa')],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
stdout, stderr = proc.communicate()
stderr = stderr.strip()
if isinstance(stdout, str):
try:
stdout = stdout.decode('ascii', 'ignore').encode('ascii')
except AttributeError:
pass
stdout = stdout.strip().split('\n')
if stderr.startswith('augustus: ERROR'):
print(stderr)
return version, functional
else:
for line in stdout:
line = line.strip()
if line.startswith('# start gene g1'):
functional = True
return version, functional
def maker2evm(inputfile, outputdir):
tr = os.path.join(outputdir, 'transcript_alignments.gff3')
pr = os.path.join(outputdir, 'protein_alignments.gff3')
gr = os.path.join(outputdir, 'gene_predictions.gff3')
with open(tr, 'w') as trout:
with open(pr, 'w') as prout:
with open(gr, 'w') as grout:
with open(inputfile, 'r') as input:
for line in input:
if line.startswith('#'):
continue
if 'trnascan' in line:
continue
cols = line.split('\t')
if 'maker' in cols[1]:
grout.write(line)
elif 'protein2genome' in cols[1]:
if 'match_part' in cols[2]:
cols[2] = 'nucleotide_to_protein_match'
cols[5] = '.'
prout.write('\t'.join(cols))
elif 'est2genome' in cols[1]:
if 'match_part' in cols[2]:
cols[2] = 'EST_match'
cols[5] = '.'
trout.write('\t'.join(cols))
elif 'cdna2genome' in cols[1]:
if 'match_part' in cols[2]:
cols[2] = 'EST_match'
cols[5] = '.'
trout.write('\t'.join(cols))
elif 'pred_gff' in cols[1]:
if 'match_part' in cols[2]:
cols[1] = cols[1].replace('pred_gff:', '')
cols[2] = 'EST_match'
cols[5] = '100.0'
trout.write('\t'.join(cols))
def flatten(l):
flatList = []
for elem in l:
# if an element of a list is a list
# iterate over this list and add elements to flatList
if type(elem) == list:
for e in elem:
flatList.append(e)
else:
flatList.append(elem)
return flatList
def fmtcols(mylist, cols):
justify = []
for i in range(0, cols):
length = max([len(x) for x in mylist[i::cols]])
length += 2
ljust = [x.ljust(length) for x in mylist[i::cols]]
justify.append(ljust)
justify = flatten(justify)
num_lines = len(mylist) / cols
lines = (' '.join(justify[i::num_lines])
for i in range(0, num_lines))
return "\n".join(lines)
def list_columns(obj, cols=4, columnwise=True, gap=4):
"""
Print the given list in evenly-spaced columns.
Parameters
----------
obj : list
The list to be printed.
cols : int
The number of columns in which the list should be printed.
columnwise : bool, default=True
If True, the items in the list will be printed column-wise.
If False the items in the list will be printed row-wise.
gap : int
The number of spaces that should separate the longest column
item/s from the next column. This is the effective spacing
between columns based on the maximum len() of the list items.
"""
sobj = [str(item) for item in obj]
if cols > len(sobj):
cols = len(sobj)
max_len = max([len(item) for item in sobj])
if columnwise:
cols = int(math.ceil(float(len(sobj)) / float(cols)))
plist = [sobj[i: i+cols] for i in range(0, len(sobj), cols)]
if columnwise:
if not len(plist[-1]) == cols:
plist[-1].extend(['']*(len(sobj) - len(plist[-1])))
plist = list(zip(*plist))
printer = '\n'.join([
''.join([c.ljust(max_len + gap) for c in p])
for p in plist])
return printer
def roundup(x):
return x if x % 100 == 0 else x + 100 - x % 100
def maxabs(a, axis=None):
import numpy as np
"""Return slice of a, keeping only those values that are furthest away
from 0 along axis"""
maxa = a.max(axis=axis)
mina = a.min(axis=axis)
p = abs(maxa) > abs(mina) # bool, or indices where +ve values win
n = abs(mina) > abs(maxa) # bool, or indices where -ve values win
if axis is None:
if p:
return maxa
else:
return mina
shape = list(a.shape)
shape.pop(axis)
out = np.zeros(shape, dtype=a.dtype)
out[p] = maxa[p]
out[n] = mina[n]
return out
def setupLogging(LOGNAME):
global log
if 'darwin' in sys.platform:
stdoutformat = logging.Formatter(
colr.GRN+'%(asctime)s'+colr.END+': %(message)s', datefmt='[%b %d %I:%M %p]')
else:
stdoutformat = logging.Formatter(
'%(asctime)s: %(message)s', datefmt='[%b %d %I:%M %p]')
fileformat = logging.Formatter(
'%(asctime)s: %(message)s', datefmt='[%x %H:%M:%S]')
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
sth = logging.StreamHandler()
sth.setLevel(logging.INFO)
sth.setFormatter(stdoutformat)
log.addHandler(sth)
fhnd = logging.FileHandler(LOGNAME)
fhnd.setLevel(logging.DEBUG)
fhnd.setFormatter(fileformat)
log.addHandler(fhnd)
def renameGFF(input, newname, output):
contigs = set()
with open(output, 'w') as outfile:
with open(input, 'r') as infile:
for line in infile:
if line.startswith('>'): # remove any fasta sequences
continue
if line.startswith('#'):
outfile.write(line)
else:
cols = line.split('\t')
# make sure it has correct columns to be GFF
if len(cols) == 9:
contigs.add(cols[0])
outfile.write('{}\t{}\t{}'.format(cols[0], newname,
'\t'.join(cols[2:])))
return contigs
def countGFFgenes(input):
count = 0
if os.path.exists(input):
with open(input, 'r') as f:
for line in f:
if "\tgene\t" in line:
count += 1
return count
def countEVMpredictions(input):
Counts = {'total': 0}
with open(input, 'r') as f:
for line in f:
if line.startswith('\n') or line.startswith('#'):
continue
line = line.strip()
contig, source, feature, start, end, blank, strand, score, info = line.split(
'\t')
if feature == 'gene':
Counts['total'] += 1
if not source in Counts:
Counts[source] = 1
else:
Counts[source] += 1
return Counts
def countGMAPtranscripts(input):
count = 0
with open(input, 'r') as f:
for line in f:
if line.startswith('###'):
count += 1
return count
def runMultiProgress(function, inputList, cpus, progress=True):
# setup pool
p = multiprocessing.Pool(cpus)
# setup results and split over cpus
tasks = len(inputList)
results = []
for i in inputList:
results.append(p.apply_async(function, [i]))
# refresh pbar every 5 seconds
if progress:
while True:
incomplete_count = sum(1 for x in results if not x.ready())
if incomplete_count == 0:
break
sys.stdout.write(" Progress: %.2f%% \r" %
(float(tasks - incomplete_count) / tasks * 100))
sys.stdout.flush()
time.sleep(1)
p.close()
p.join()
def runMultiNoProgress(function, inputList, cpus):
# setup pool
p = multiprocessing.Pool(cpus)
# setup results and split over cpus
results = []
for i in inputList:
results.append(p.apply_async(function, [i]))
p.close()
p.join()
def cleanProteins(inputList, output):
# expecting a list of protein fasta files for combining/cleaning headers
# make sure you aren't duplicated sequences names
# dropping proteins less than 50 amino acids
seen = set()
with open(output, 'w') as out:
for x in inputList:
with open(x, 'r') as input:
for rec in SeqIO.parse(input, 'fasta'):
if len(rec.seq) < 50:
continue
# explicitly check for swissprot and jgi
if rec.id.startswith('sp|') or rec.id.startswith('jgi|'):
ID = rec.id.split('|')[-1]
else:
ID = rec.id
# now clean up the shit
badshit = [':', ';', '/', '\\', '.', ',', '%']
for i in badshit:
if i in ID:
ID = ID.replace(i, '_')
if not ID in seen:
seen.add(ID)
else:
# means that ID has already been used, so add a number to it, auto increment
counter = 1
while ID in seen:
oldnum = counter-1
ID = ID.replace('_'+str(oldnum),
'') + '_'+str(counter)
counter += 1
seen.add(ID)
out.write('>%s\n%s\n' % (ID, rec.seq))
def genemark2busco(genemark, bedfile, output):
#function to load coords into Interlap from bedfile, then pull out
#genemark EVM gff3 format
counter = 0
inter = bed2interlap(bedfile)
with open(output, 'w') as outfile:
with open(genemark, 'r') as infile:
for gene_model in readBlocks(infile, '\n'):
if len(gene_model) < 2:
continue
if gene_model[0] == '\n':
cols = gene_model[1].split('\t')
else:
cols = gene_model[0].split('\t')
coords = [int(cols[3]), int(cols[4])]
chr = cols[0]
if interlapIntersect(coords, chr, inter):
counter += 1
outfile.write('{}'.format(''.join(gene_model)))
return counter
def evidence2busco(evidence, bedfile, output):
counter = 0
inter = bed2interlap(bedfile)
with open(output, 'w') as outfile:
with open(evidence, 'r') as infile:
for hit in readBlocks(infile, '\n'):
hit = [x for x in hit if x != '\n']
if len(hit) == 1:
start = int(hit[0].split('\t')[3])
end = int(hit[0].split('\t')[4])
coords = [start, end]
chr = hit[0].split('\t')[0]
elif len(hit) > 1:
start = int(hit[0].split('\t')[3])
end = int(hit[-1].split('\t')[4])
chr = hit[0].split('\t')[0]
if start < end:
coords = [start, end]
else:
coords = [end, start]
else:
continue
if interlapIntersect(coords, chr, inter):
counter += 1
outfile.write('{}\n'.format(''.join(hit)))
return counter
def fix_busco_naming(busco_infile, genome, augustus, gffout, ploidy=1,
proteins=False):
def group_separator(line):
return line == '\n'
# parse the busco table into dictionary format
busco_complete = {}
passing = ['Complete']
if ploidy > 1:
passing.append('Duplicated')
with open(busco_infile, 'r') as buscoinput:
for line in buscoinput:
if line.startswith('#'):
continue
cols = line.split('\t')
if cols[1] in passing:
if not cols[0] in busco_complete:
busco_complete[cols[0]] = cols[2]+':'+cols[3]+'-'+cols[4]
# now parse the augustus input file where gene numbers are likely repeated.
results = []
with open(augustus) as f:
for key, group in itertools.groupby(f, group_separator):
if not key:
results.append(list(group))
# loop through each gene model, lookup the BUSCO name, and then replace the name with counter based and busco model name
tmpOut = augustus+'.intermediate'
counter = 0
inverse_busco = {v: k for k, v in list(busco_complete.items())}
with open(tmpOut, 'w') as output:
for i in results:
counter += 1
cols = i[0].split('\t')
lookup = cols[0]+':'+cols[3]+'-'+cols[4]
if lookup in inverse_busco:
name = inverse_busco.get(lookup)
else:
name = 'unknown_model'
ID = cols[8].split(';')[0]
ID = ID.replace('ID=', '')
newID = 'gene'+str(counter)
newblock = ''.join(i)
newblock = newblock.replace('Augustus%20prediction', name)
newblock = newblock.replace(ID, newID)
output.write(newblock+'\n')
#write to GFF3 properly and fix CDS
Genes = {}
Genes = gff2dict(tmpOut, genome, Genes)
dict2gff3(Genes, gffout)
if proteins:
dict2proteins(Genes, proteins)
def gb2output(input, output1, output2, output3):
with open(output1, 'w') as proteins:
with open(output2, 'w') as transcripts:
with open(output3, 'w') as scaffolds:
with open(input, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
scaffolds.write(">%s\n%s\n" % (record.id, record.seq))
for f in record.features:
if f.type == "CDS":
proteins.write(">%s\n%s\n" % (f.qualifiers['locus_tag'][0], softwrap(
f.qualifiers['translation'][0].rstrip('*'))))
if f.type == "mRNA":
feature_seq = f.extract(record.seq)
transcripts.write(">%s\n%s\n" % (
f.qualifiers['locus_tag'][0], softwrap(feature_seq)))
def sortGFF(input, output, order):
cmd = ['bedtools', 'sort', '-header', '-faidx', order, '-i', input]
with open(output, 'w') as out:
proc = subprocess.Popen(cmd, stdout=out, stderr=subprocess.PIPE)
stderr = proc.communicate()
if stderr:
if stderr[0] is None:
if stderr[1] != '':
log.error(
"Sort GFF failed, unreferenced scaffold present in gene predictions, check logfile")
sys.exit(1)
def sortBedproper(input, output):
# sort BED file same as GFF3 files
data = []
with open(input, 'r') as infile:
for line in infile:
if line.startswith('\n'):
continue
line = line.rstrip()
cols = line.split('\t')
data.append(cols)
# we can now sort
sort_data = natsorted(data, key=lambda x: (x[0], int(x[1])))
# now we can write back out to file
with open(output, 'w') as outfile:
for x in sort_data:
outfile.write('{}\n'.format('\t'.join(x)))
def sortGFFproper(input, output):
# function to sort GFF3 file but maintain gene, mrna, exon, cds order
data = []
features = set()
comments = []
with open(input, 'r') as infile:
for line in infile:
if line.startswith('\n'):
continue
if line.startswith('#'):
comments.append(line)
continue
line = line.rstrip()
cols = line.split('\t')
data.append(cols)
features.add(cols[2])
# build sort order dictionary for features
order_map = {'gene': 0, 'mRNA': 1, 'transcript': 2, 'tRNA': 3, 'ncRNA': 4,
'rRNA': 5, 'pseudogene': 6, 'five_prime_utr': 7,
'five_prime_UTR': 8, 'exon': 9, 'CDS': 10,
'three_prime_utr': 11, 'three_prime_UTR': 12}
idx = len(order_map)
for x in features:
if x not in order_map:
order_map[x] = idx
idx += 1
# we can now sort
sort_data = natsorted(data, key=lambda x: (x[0], int(x[3]), order_map[x[2]]))
# now we can write back out to file
with open(output, 'w') as outfile:
for y in comments:
outfile.write(y)
for x in sort_data:
outfile.write('{}\n'.format('\t'.join(x)))
def checkGenBank(input):
count = 0
with open(input, 'r') as gbk:
for record in SeqIO.parse(gbk, 'genbank'):
for f in record.features:
if f.type == 'CDS':
count += 1
if count == 0:
return False
else:
return True
def countGenBank(input):
cds = 0
trna = 0
dnas = 0
with open(input, 'r') as gbk:
for record in SeqIO.parse(gbk, 'genbank'):
dnas += 1
for f in record.features:
if f.type == 'CDS':
cds += 1
elif f.type == 'tRNA':
trna += 1
return dnas, cds, trna
def checkFastaHeaders(input, limit):
length = 0
names = []
with open(input, 'r') as fasta:
for line in fasta:
if line.startswith('>'):
line = line.replace('\n', '')
ID = line.replace('>', '').strip()
names.append(ID)
# subtract one character for fasta carrot
headlen = len(line) - 1
if headlen > length:
length = headlen
if length > int(limit):
return (False, names)
else:
return (True, names)
def analyzeAssembly(input, header_max=16):
from Bio.SeqIO.FastaIO import SimpleFastaParser
bad_names = []
IUPAC = {'A', 'C', 'G', 'T', 'R', 'Y',
'S', 'W', 'K', 'M', 'B', 'D',
'H', 'V', 'N'}
nuc_errors = {}
suspect = {}
with open(input, 'r') as infile:
for title, seq in SimpleFastaParser(infile):
if len(title) > header_max:
bad_names.append(title)
# get number of each contig
characters = {}
for nuc in seq:
nuc = nuc.upper()
if not nuc in characters:
characters[nuc] = 1
else:
characters[nuc] += 1
# check for non IUPAC characters
errors = []
for k, v in characters.items():
if k not in IUPAC:
errors.append((k, v))
if len(errors) > 0:
nuc_errors[title] = errors
# if there are less than 4 characters in scaffolds, its suspect
if len(characters) < 4:
suspect[title] = characters
return bad_names, nuc_errors, suspect
def BamHeaderTest(genome, mapping):
# get list of fasta headers from genome
genome_headers = []
with open(genome, 'r') as input:
for rec in SeqIO.parse(input, 'fasta'):
if rec.id not in genome_headers:
genome_headers.append(rec.id)
# get list of fasta headers from BAM
bam_headers = []
cmd = ['samtools', 'idxstats', os.path.realpath(mapping)]
for line in execute(cmd):
line = line.rstrip()
chr, length, mapped, unmapped = line.split('\t')[:4]
if chr != '*':
bam_headers.append(chr)
# now compare lists, basically if BAM headers not in genome headers, then output bad names to logfile and return FALSE
genome_headers = set(genome_headers)
diffs = [x for x in bam_headers if x not in genome_headers]
if len(diffs) > 0:
log.debug(
"ERROR: These BAM headers not found in genome FASTA headers\n%s" % ','.join(diffs))
return False
else:
return True
def mapCount(input, location_dict, output):
Counts = {}
for aln in execute(['samtools', 'view', os.path.realpath(input)]):
cols = aln.split('\t')
if not cols[2] in Counts:
Counts[cols[2]] = 1
else:
Counts[cols[2]] += 1
with open(output, 'w') as outfile:
outfile.write("#mRNA-ID\tgene-ID\tLocation\tTPM\n")
for k, v in natsorted(list(location_dict.items())):
if k in Counts:
tpm = Counts.get(k)
else:
tpm = 0
geneID = v[0]
location = v[1]
outfile.write('{:}\t{:}\t{:}\t{:.2f}\n'.format(
k, geneID, location, float(tpm)))
def tokenizeString(aString, separators):
# separators is an array of strings that are being used to split the the string.
# sort separators in order of descending length
separators.sort(key=len)
listToReturn = []
i = 0
while i < len(aString):
theSeparator = ""
for current in separators:
if current == aString[i:i+len(current)]:
theSeparator = current
if theSeparator != "":
listToReturn += [theSeparator]
i = i + len(theSeparator)
else:
if listToReturn == []:
listToReturn = [""]
if(listToReturn[-1] in separators):
listToReturn += [""]
listToReturn[-1] += aString[i]
i += 1
return listToReturn
def bam2gff3(input, output):
count = 0
with open(output, 'w') as gffout:
gffout.write('##gff-version 3\n')
for aln in execute(['samtools', 'view', os.path.realpath(input)]):
cols = aln.split('\t')
if cols[1] == '0':
strand = '+'
elif cols[1] == '16':
strand = '-'
else:
continue
cs = None
nm = None
tags = cols[11:]
if not tags:
continue
for x in tags:
if x.startswith('cs:'):
cs = x.replace('cs:Z:', '')
if x.startswith('NM:'):
nm = int(x.split(':')[-1])
if nm is None or cs is None:
continue
matches = 0
ProperSplice = True
splitter = []
exons = [int(cols[3])]
position = int(cols[3])
query = [1]
querypos = 0
num_exons = 1
gaps = 0
splitter = tokenizeString(cs, [':', '*', '+', '-', '~'])
for i, x in enumerate(splitter):
if x == ':':
matches += int(splitter[i+1])
position += int(splitter[i+1])
querypos += int(splitter[i+1])
elif x == '-':
gaps += 1
elif x == '+':
gaps += 1
querypos += len(splitter[i+1])
elif x == '~':
if cols[1] == '0':
if splitter[i+1].startswith('gt') and splitter[i+1].endswith('ag'):
ProperSplice = True
elif splitter[i+1].startswith('at') and splitter[i+1].endswith('ac'):
ProperSplice = True
else:
ProperSplice = False
elif cols[1] == '16':
if splitter[i+1].startswith('ct') and splitter[i+1].endswith('ac'):
ProperSplice = True
elif splitter[i+1].startswith('gt') and splitter[i+1].endswith('at'):
ProperSplice = True
else:
ProperSplice = False
num_exons += 1
exons.append(position)
query.append(querypos)
query.append(querypos+1)
intronLen = int(splitter[i+1][2:-2])
position += intronLen
exons.append(position)
# add last Position
exons.append(position)
query.append(len(cols[9]))
# convert exon list into list of exon tuples
exons = list(zip(exons[0::2], exons[1::2]))
queries = list(zip(query[0::2], query[1::2]))
if ProperSplice:
mismatches = nm - gaps
pident = 100 * (matches / (matches + mismatches))
if pident < 80:
continue
count += 1
for i, exon in enumerate(exons):
start = exon[0]
end = exon[1]-1
if strand == '+':
qstart = queries[i][0]
qend = queries[i][1]
else:
qstart = len(cols[9]) - queries[i][1] + 1
qend = len(cols[9]) - queries[i][0] + 1
gffout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:.2f}\t{:}\t{:}\tID={:};Target={:} {:} {:}\n'.format(
cols[2], 'genome', 'cDNA_match', start, end, pident, strand, '.', cols[0], cols[0], qstart, qend))
return count
def bam2ExonsHints(input, gff3, hints):
count = 0
with open(gff3, 'w') as gffout:
gffout.write('##gff-version 3\n')
with open(hints, 'w') as hintsout:
num = -1
for aln in execute(['samtools', 'view', os.path.realpath(input)]):
num += 1
cols = aln.split('\t')
if cols[1] == '0':
strand = '+'
elif cols[1] == '16':
strand = '-'
else:
continue
cs = None
nm = None
tags = cols[11:]
for x in tags:
if x.startswith('cs:'):
cs = x.replace('cs:Z:', '')
if x.startswith('NM:'):
nm = int(x.split(':')[-1])
if nm is None or cs is None:
continue
matches = 0
ProperSplice = True
splitter = []
exons = [int(cols[3])]
position = int(cols[3])
query = [1]
querypos = 0
num_exons = 1
gaps = 0
splitter = tokenizeString(cs, [':', '*', '+', '-', '~'])
for i, x in enumerate(splitter):
if x == ':':
matches += int(splitter[i+1])
position += int(splitter[i+1])
querypos += int(splitter[i+1])
elif x == '-':
gaps += 1
elif x == '+':
gaps += 1
querypos += len(splitter[i+1])
elif x == '~':
if cols[1] == 0:
if splitter[i+1].startswith('gt') and splitter[i+1].endswith('ag'):
ProperSplice = True
elif splitter[i+1].startswith('at') and splitter[i+1].endswith('ac'):
ProperSplice = True
else:
ProperSplice = False
break
elif cols[1] == 16:
if splitter[i+1].startswith('ct') and splitter[i+1].endswith('ac'):
ProperSplice = True
elif splitter[i+1].startswith('gt') and splitter[i+1].endswith('at'):
ProperSplice = True
else:
ProperSplice = False
break
num_exons += 1
exons.append(position)
query.append(querypos)
query.append(querypos+1)
intronLen = int(splitter[i+1][2:-2])
position += intronLen
exons.append(position)
# add last Position
exons.append(position)
query.append(len(cols[9]))
# convert exon list into list of exon tuples
exons = list(zip(exons[0::2], exons[1::2]))
queries = list(zip(query[0::2], query[1::2]))
introns = []
if len(exons) > 1:
for x, y in enumerate(exons):
try:
introns.append((y[1], exons[x+1][0]-1))
except IndexError:
pass
if ProperSplice:
mismatches = nm - gaps
pident = 100 * (matches / (matches + mismatches))
if pident < 80:
continue
feature = 'EST_match'
if pident > 95:
feature = 'cDNA_match'
count += 1
for i, exon in enumerate(exons):
start = exon[0]
end = exon[1]-1
qstart = queries[i][0]
qend = queries[i][1]
if i == 0 or i == len(exons)-1:
gffout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:.2f}\t{:}\t{:}\tID=minimap2_{:};Target={:} {:} {:} {:}\n'.format(
cols[2], 'genome', feature, start, end, pident, strand, '.', num+1, cols[0], qstart, qend, strand))
hintsout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tgrp=minimap2_{:};pri=4;src=E\n'.format(
cols[2], 'b2h', 'ep', start, end, 0, strand, '.', num+1, cols[0]))
else:
gffout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:.2f}\t{:}\t{:}\tID=minimap2_{:};Target={:} {:} {:} {:}\n'.format(
cols[2], 'genome', feature, start, end, pident, strand, '.', num+1, cols[0], qstart, qend, strand))
hintsout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tgrp=minimap2_{:};pri=4;src=E\n'.format(
cols[2], 'b2h', 'exon', start, end, 0, strand, '.', num+1, cols[0]))
if len(introns) > 0:
for z in introns:
hintsout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tgrp=minimap2_{:};pri=4;src=E\n'.format(
cols[2], 'b2h', 'intron', z[0], z[1], 1, strand, '.', num+1, cols[0]))
return count
def combineTranscripts(minimap, gmap, output):
'''
function to combine minimap GFF3 and gmap GFF3 files
need to rename GMAP as you loop through and GFF3 from gmap is kind of messed up.
'''
with open(output, 'w') as out:
if minimap:
with open(minimap, 'r') as mini:
for line in mini:
out.write(line)
else:
out.write('##gff-version 3\n')
with open(gmap, 'r') as gmap_in:
for i, aln in enumerate(readBlocks(gmap_in, '###')):
for x in aln:
if not x.startswith('#'):
contig, source, feature, start, end, score, strand, phase, attributes = x.split(
'\t')
info = attributes.split(';')
for y in info:
if y.startswith('Target='):
Target = y
out.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID=gmap_{:};{:}\n'.format(
contig, source, feature, start, end, score, strand, phase, i+1, Target))
def RevComp(s):
rev_comp_lib = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'U': 'A', 'M': 'K', 'R': 'Y', 'W': 'W',
'S': 'S', 'Y': 'R', 'K': 'M', 'V': 'B', 'H': 'D', 'D': 'H', 'B': 'V', 'X': 'X', 'N': 'N'}
cseq = ''
n = len(s)
s = s.upper()
for i in range(0, n):
c = s[n-i-1]
cseq += rev_comp_lib[c]
return cseq
def translate(cDNA, strand, phase):
'''
translate cDNA into protein sequence
trying to see if I can speed this up over Biopython
'''
def _split(str, num):
return [str[start:start+num] for start in range(0, len(str), num)]
codon_table = {'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L',
'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P',
'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I',
'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T',
'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K',
'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A',
'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D',
'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G',
'GGG': 'G', 'TAA': '*', 'TAG': '*', 'TGA': '*'}
if strand == '-' or strand == -1:
seq = RevComp(cDNA)
else:
seq = cDNA
seq = seq[phase:]
# map seq to proteins
protSeq = []
for i in _split(seq, 3):
if len(i) == 3:
iSeq = i.upper()
if iSeq in codon_table:
aa = codon_table[iSeq]
protSeq.append(aa)
else:
protSeq.append('X')
return ''.join(protSeq)
def extend2stop(seqDict, header, coordinates, strand, phase, protLen):
'''
try to extend a CDS lacking a stop to find a stop codon
it will extend a CDS up to 20 codons (60 bp) from the current
frame to find a stop codon, if none is found it will return
the original coordinates
'''
sorted_coordinates = sorted(coordinates, key=lambda tup: tup[0])
if strand == '+':
newStop = sorted_coordinates[-1][1]+60
if newStop > len(seqDict[header]):
newStop = len(seqDict[header])
lastTup = (sorted_coordinates[-1][0], newStop)
if len(sorted_coordinates) > 1:
newCoords = sorted_coordinates[:-1]
newCoords.append(lastTup)
else:
newCoords = [lastTup]
updateCDS = getSeqRegions(seqDict, header, newCoords)
updateProt = translate(updateCDS, strand, phase)
if '*' in updateProt:
num = (updateProt.find('*') - protLen + 1) * 3
finalTup = (sorted_coordinates[-1][0],
sorted_coordinates[-1][1]+num)
if len(sorted_coordinates) > 1:
finalCoords = sorted_coordinates[:-1]
finalCoords.append(finalTup)
else:
finalCoords = [finalTup]
return True, finalCoords
else:
return False, coordinates
else:
newStop = sorted_coordinates[0][0]-60
if newStop < 1:
newStop = 1
lastTup = (newStop, sorted_coordinates[0][1])
newCoords = [lastTup]
if len(sorted_coordinates) > 1:
newCoords += sorted_coordinates[1:]
updateCDS = getSeqRegions(seqDict, header, newCoords)
updateProt = translate(updateCDS, strand, phase)
if '*' in updateProt:
num = (updateProt.find('*') - protLen + 1) * 3
finalTup = (sorted_coordinates[0][0]-num, sorted_coordinates[0][1])
finalCoords = [finalTup]
if len(sorted_coordinates) > 1:
finalCoords += sorted_coordinates[1:]
finalSort = sorted(
finalCoords, key=lambda tup: tup[0], reverse=True)
return True, finalSort
else:
return False, coordinates
def getSeqRegions(SeqRecordDict, header, coordinates):
# takes SeqRecord dictionary or Index, returns sequence string
# coordinates is a list of tuples [(1,10), (20,30)]
result = ''
sorted_coordinates = sorted(coordinates, key=lambda tup: tup[0])
for x in sorted_coordinates:
partial = SeqRecordDict[header][x[0]-1:x[1]]
result += str(partial.seq)
return result
def convertgff2tbl(gff, prefix, fasta, prots, trans, tblout, external=False):
from collections import OrderedDict
'''
function to convert directly from gff to tbl
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# load GFF annotations into funannotate dictionary
Genes = {}
Genes = gff2dict(gff, fasta, Genes)
# get scaffold names/lengths
scaffLen = {}
with open(fasta, 'r') as seqin:
for record in SeqIO.parse(seqin, 'fasta'):
if not record.id in scaffLen:
scaffLen[record.id] = len(record.seq)
# get partialStart/stop info and load scaffold dictionary with coordinates of Genes
sGenes = sorted(iter(Genes.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
renamedGenes = {}
scaff2genes = {}
counter = 1
for k, v in list(sortedGenes.items()):
if not prefix:
locusTag = k
else:
locusTag = prefix+'_'+str(counter).zfill(6)
if not locusTag in renamedGenes:
renamedGenes[locusTag] = v
if not v['contig'] in scaff2genes:
scaff2genes[v['contig']] = [locusTag]
else:
scaff2genes[v['contig']].append(locusTag)
counter += 1
if external:
log.info('Found {:,} gene models from GFF3 annotation'.format(len(sortedGenes)))
dicts2tbl(renamedGenes, scaff2genes, scaffLen, 'CFMR', '12345', [],
tblout, external=external)
# transcript to geneID dictionary
geneDB = {}
for k, v in list(renamedGenes.items()):
for x in v['ids']:
if not x in geneDB:
geneDB[x] = k
# write to protein and transcripts
with open(prots, 'w') as protout:
with open(trans, 'w') as tranout:
for k, v in natsorted(list(Genes.items())):
if v['pseudo'] and v['pseudo'] is True:
continue
for i, x in enumerate(v['ids']):
try:
Transcript = str(v['transcript'][i])
except IndexError:
print((k, v))
if v['strand'] == '-':
Transcript = RevComp(Transcript)
tranout.write('>%s %s\n%s\n' % (x, k, softwrap(Transcript)))
if v['type'] == 'mRNA':
Prot = v['protein'][i]
if Prot.endswith('*'):
Prot = Prot.rstrip('*')
protout.write('>%s %s\n%s\n' % (x, k, softwrap(Prot)))
return len(Genes), geneDB
def tblfilter(input, remove, output):
'''
function to take an NCBI tbl file and drop gene models present in remove file
'''
# get items to remove list
removeModels = []
with open(remove, 'r') as file:
for line in file:
if line.startswith('#') or line.startswith('\n'):
continue
line = line.strip()
if not line in removeModels:
removeModels.append(line)
# now loop through tbl file and get line positions of gene models
found = []
with open(output, 'w') as outfile:
with open(input, 'r') as infile:
for gene in readBlocks2(infile, '>Feature', '\tgene\n'):
if gene[0].startswith('>Feature'):
outfile.write(''.join(gene))
else:
locusTag = None
for x in gene:
if x.startswith('\t\t\tlocus_tag\t'):
locusTag = x.split('\t')[-1].rstrip()
if locusTag and not locusTag in removeModels:
outfile.write(''.join(gene))
else:
if not locusTag:
log.debug(
'LocusTag not found parsing NCBI Tbl file (this should not happen)')
print(gene)
else:
found.append(locusTag)
log.debug("Removed %i out of %i gene models from annotation" %
(len(found), len(removeModels)))
s = set(found)
diff = [x for x in removeModels if x not in s]
if len(diff) > 0:
log.debug('Could not find %i gene models:\n %s' %
(len(diff), ','.join(diff)))
def annotations2dict(input, geneDB={}, custom=False):
Annotations = {}
with open(input, 'r') as all_annots:
for line in all_annots:
line = line.replace('\n', '')
ID, refDB, description = line.split('\t')
if description == '': # there is nothing here, so skip
continue
if refDB == 'name' or refDB == 'product':
if len(geneDB) == 0:
if '-T' in ID:
geneID = ID.split('-T')[0]
else:
geneID = ID
else:
if ID in geneDB:
geneID = geneDB[ID]
else:
geneID = ID
else:
geneID = ID
if not geneID in Annotations:
Annotations[geneID] = {refDB: [description]}
else:
if not refDB in Annotations[geneID]:
Annotations[geneID][refDB] = [description]
else:
Annotations[geneID][refDB].append(description)
if custom:
log.info("Parsing custom annotations from {:}".format(custom))
with open(custom, 'r') as custom_annots:
for line in custom_annots:
line = line.rstrip()
try:
if line.count('\t') != 2:
continue
except UnicodeDecodeError:
log.error('Error parsing the custom annotations:')
print(line)
sys.exit(1)
ID, refDB, description = line.split('\t')
if description == '':
continue
if refDB in ['name', 'product', 'gene_synonym']:
if len(geneDB) == 0:
if '-T' in ID:
geneID = ID.split('-T')[0]
else:
geneID = ID
else:
if ID in geneDB:
geneID = geneDB[ID]
else:
geneID = ID
else:
geneID = ID
if not geneID in Annotations:
Annotations[geneID] = {refDB: [description]}
else:
if not refDB in Annotations[geneID]:
Annotations[geneID][refDB] = [description]
elif refDB == 'name':
previousNames = Annotations[geneID][refDB]
if not 'gene_synonym' in Annotations[geneID]:
Annotations[geneID]['gene_synonym'] = previousNames
else:
Annotations[geneID]['gene_synonym'] += previousNames
Annotations[geneID][refDB] = [description]
elif refDB == 'product':
Annotations[geneID][refDB] = [description]
else:
Annotations[geneID][refDB].append(description)
# make sure no synonyms are repeated
for k, v in natsorted(Annotations.items()):
if 'gene_synonym' in v and 'name' in v:
synonym_set = set(v['gene_synonym'])
cleaned = [x for x in synonym_set if x not in v['name']]
Annotations[k]['gene_synonym'] = cleaned
elif 'gene_synonm' in v:
synonym_set = set(v['gene_synonym'])
Annotations[k]['gene_synonym'] = list(synonym_set)
return Annotations
def updateTBL(input, annotDict, output, prefix=False, newtag=False):
'''
general function to parse ncbi tbl format and add functional annotation
'''
log.debug('Parsing tbl file: {:}'.format(os.path.abspath(input)))
tmpoutput = output+'.tmp'
with open(input, 'r') as infile:
with open(tmpoutput, 'w') as outfile:
for gene in readBlocks2(infile, '>Feature', '\tgene\n'):
transcriptsSeen = []
# transcriptNum = 0
if gene[0].startswith('>Feature'):
outfile.write(''.join(gene))
else:
locusTag, locusTagIndex, LocusType, geneAnnot, transcriptAnnot = (
None,)*5
for i, x in enumerate(gene):
if x.startswith('\t\t\tlocus_tag\t'):
locusTag = x.split('\t')[-1].rstrip()
locusTagIndex = i
if not locusTagIndex:
outfile.write(''.join(gene))
continue
try:
locusType = gene[locusTagIndex+1].split('\t')[-1].rstrip()
except IndexError:
print(gene)
except TypeError:
print(gene)
if locusType in ['tRNA', 'ncRNA', 'rRNA']:
outfile.write(''.join(gene))
elif locusType == 'mRNA':
if locusTag in annotDict:
geneAnnot = annotDict.get(locusTag)
else:
geneAnnot = {}
for line in gene:
if line.startswith('\t\t\tlocus_tag\t'):
if 'name' in geneAnnot:
outfile.write('\t\t\tgene\t%s\n' %
geneAnnot['name'][0])
if 'gene_synonym' in geneAnnot:
for z in set(geneAnnot['gene_synonym']):
outfile.write('\t\t\tgene_synonym\t%s\n' % z)
outfile.write(line)
elif line.startswith('\t\t\tproduct\t'):
if not 'product' in geneAnnot:
outfile.write(line)
elif line.startswith('\t\t\ttranscript_id\t'):
ID = line.split('|')[-1]
ID = ID.split('_mrna')[0]
if not ID in transcriptsSeen:
transcriptsSeen.append(ID)
transcriptNum = len(transcriptsSeen)
if ID in annotDict:
transcriptAnnot = annotDict.get(ID)
if 'product' in geneAnnot:
Description = geneAnnot['product'][0]
if transcriptNum > 1:
Description = Description + ', variant {:}'.format(transcriptNum)
outfile.write('\t\t\tproduct\t%s\n' % Description)
outfile.write(line)
elif line.startswith('\t\t\tcodon_start\t'):
outfile.write(line)
if transcriptAnnot:
for item in transcriptAnnot:
if item in ['name', 'product', 'gene_synonym']:
continue
for x in set(transcriptAnnot[item]):
outfile.write('\t\t\t%s\t%s\n' % (item, x))
else:
outfile.write(line)
if newtag:
with open(output, 'w') as outfile:
with open(tmpoutput, 'r') as infile:
for line in infile:
if line.startswith('\t\t\tlocus_tag\t'):
line = line.replace('\t'+prefix, '\t'+newtag)
elif line.startswith('\t\t\ttranscript_id\t') or line.startswith('\t\t\tprotein_id\t'):
line = line.replace('|'+prefix, '|'+newtag)
outfile.write(line)
os.remove(tmpoutput)
else:
os.rename(tmpoutput, output)
def bed2gff3(input, output):
'''
convert repeats bed file into GFF3 format
Contig245 36 69 Repeat_1
Contig245 265 288 Repeat_2
Contig245 477 493 Repeat_3
Contig245 780 797 Repeat_4
Contig245 997 1016 Repeat_5
'''
with open(output, 'w') as outfile:
outfile.write("##gff-version 3\n")
with open(input, 'r') as bedfile:
for line in bedfile:
line = line.strip()
if line.startswith('\n'):
continue
contig, start, end, name = line.split('\t')
start = int(start) + 1 # bed is 0-based, gff 1-based
outfile.write(
'{:}\tRepeatMasker\tdispersed_repeat\t{:}\t{:}\t.\t+\t.\tID={:}\n'.format(contig, start, end, name))
def findUTRs(cds, mrna, strand):
import numpy
FiveUTR = []
ThreeUTR = []
if cds != mrna:
inter = InterLap()
inter.add(cds)
for i, x in enumerate(mrna):
if not x in inter:
loc = (list(inter)[0][0], list(inter)[-1][1])
diff = numpy.subtract(x, loc)
if diff[0] < 0 and diff[1] < 0:
if strand == '+':
FiveUTR.append(x)
else:
ThreeUTR.append(x)
elif diff[0] > 0 and diff[1] > 0:
if strand == '+':
ThreeUTR.append(x)
else:
FiveUTR.append(x)
else:
hit = list(inter.find(x))
if x == hit[0]:
continue
else:
diff = numpy.subtract(x, hit[0])
if strand == '+':
if int(diff[0]) < 1 and int(diff[1]) == 0:
FiveUTR.append((x[0], hit[0][0]-1))
elif int(diff[1]) > 1 and int(diff[0]) == 0:
ThreeUTR.append((hit[0][1]+1, x[1]))
elif int(diff[0]) < 1 and int(diff[1]) > 1:
FiveUTR.append((x[0], hit[0][0]-1))
ThreeUTR.append((hit[0][1]+1, x[1]))
else:
if diff[0] == 0 and diff[1] > 0:
FiveUTR.append((hit[0][1]+1, x[1]))
elif diff[0] < 0 and diff[1] == 0:
ThreeUTR.append((x[0], hit[0][0]-1))
elif diff[0] < 0 and diff[1] > 0:
FiveUTR.append((hit[0][1]+1, x[1]))
ThreeUTR.append((x[0], hit[0][0]-1))
return FiveUTR, ThreeUTR
def dict2nucleotides2(input, prots, trans, cdstrans):
'''
function to generate protein and transcripts from dictionary
'''
# write to protein and transcripts
with open(prots, 'w') as protout:
with open(trans, 'w') as tranout:
with open(cdstrans, 'w') as cdsout:
for k, v in natsorted(list(input.items())):
if 'pseudo' in v:
if v['pseudo']:
continue
if v['type'] == 'mRNA' and not v['CDS']:
continue
if v['type'] == 'mRNA' and not len(v['ids']) == len(v['mRNA']) == len(v['CDS']):
continue
for i, x in enumerate(v['ids']):
try:
Transcript = str(v['transcript'][i])
if v['strand'] == '-':
Transcript = RevComp(Transcript)
tranout.write('>{:} {:}\n{:}\n'.format(
x, k, softwrap(Transcript)))
except IndexError:
pass
try:
CDStranscript = str(v['cds_transcript'][i])
if v['strand'] == '-':
CDStranscript = RevComp(CDStranscript)
cdsout.write('>{:} {:}\n{:}\n'.format(
x, k, softwrap(CDStranscript)))
except IndexError:
pass
if v['type'] == 'mRNA':
try:
Prot = v['protein'][i]
except IndexError:
print(('ERROR', k, v))
sys.exit(1)
if Prot.endswith('*'):
Prot = Prot[:-1]
protout.write('>{:} {:}\n{:}\n'.format(
x, k, softwrap(Prot)))
def simpleFastaStats(fasta):
from Bio.SeqUtils import GC
from Bio.SeqIO.FastaIO import SimpleFastaParser
contigs = []
with open(fasta, 'r') as infile:
for header, seq in SimpleFastaParser(infile):
contigs.append(seq)
contigs = sorted(contigs, key=lambda x: len(x), reverse=True)
lengths = [len(x) for x in contigs]
pctGC = round(GC(''.join(contigs)), 2)
#N50
totalLen = sum(lengths)
n50_len = totalLen*.50
n90_len = totalLen*.90
n50 = None
n90 = None
runningSum = 0
for y, c in enumerate(lengths):
runningSum += c
if not n50 and runningSum >= n50_len:
n50 = c
if not n90 and runningSum >= n90_len:
n90 = c
l50 = lengths.index(n50) + 1
l90 = lengths.index(n90) + 1
numContigs = len(contigs)
avg_length = '{:.2f}'.format(totalLen / float(len(contigs)))
return numContigs, totalLen, pctGC, float(avg_length), n50, l50, n90, l90
def databases2json(FUNDB):
resources = {}
dbfile = os.path.join(FUNDB, 'funannotate-db-info.txt')
if not os.path.isfile(dbfile):
return resources
else:
with open(dbfile, 'r') as infile:
for line in infile:
line = line.rstrip()
cols = line.split('\t')
resources[cols[0]] = {
'type': cols[1],
'version': cols[3],
'date': cols[4],
'num-records': cols[5]
}
return resources
def annotation_summary(fasta, output, gff=False, tbl=False, pct=0.90,
transcripts=False, proteins=False, previous=False,
database='.', command='', organism=''):
'''
function to output annotation stats from GFF3 or TBL files
'''
import json
stats = {
'format': 'annotation',
'command': command,
'organism': organism,
'software':{
'name': 'funannotate',
'version': get_version(),
'date': datetime.datetime.today().strftime('%Y-%m-%d'),
'resources': databases2json(database)
},
'assembly': {
'num_contigs': 0,
'length': 0,
'mean_length': 0,
'N50': 0,
'L50': 0,
'N90': 0,
'L90': 0,
'GC_content': 0,
},
'annotation': {
'genes': 0,
'common_name': 0,
'mRNA': 0,
'tRNA': 0,
'ncRNA': 0,
'rRNA': 0,
'avg_gene_length': 0,
'transcript-level': {
'CDS_transcripts': 0,
'CDS_five_utr': 0,
'CDS_three_utr': 0,
'CDS_no_utr': 0,
'CDS_five_three_utr': 0,
'CDS_complete': 0,
'CDS_no-start': 0,
'CDS_no-stop': 0,
'CDS_no-start_no-stop': 0,
'total_exons': 0,
'total_cds_exons': 0,
'multiple_exon_transcript': 0,
'single_exon_transcript': 0,
'avg_exon_length': 0,
'avg_protein_length': 0,
'functional': {
'go_terms': 0,
'interproscan': 0,
'eggnog': 0,
'pfam': 0,
'cazyme': 0,
'merops': 0,
'busco': 0,
'secretion': 0
}
}
}
}
if previous: # load some stats that cant calculate from annotation
with open(previous, 'r') as infile:
previousStats = json.load(infile)
try:
stats['annotation']['transcript-level']['pct_exon_overlap_protein_evidence'] = previousStats['annotation']['transcript-level']['pct_exon_overlap_protein_evidence']
except KeyError:
pass
try:
stats['annotation']['transcript-level']['pct_exon_overlap_transcript_evidence'] = previousStats['annotation']['transcript-level']['pct_exon_overlap_transcript_evidence']
except KeyError:
pass
num, tot, gc, avg, n50, l50, n90, l90 = simpleFastaStats(fasta)
stats['assembly']['num_contigs'] = num
stats['assembly']['length'] = tot
stats['assembly']['GC_content'] = gc
stats['assembly']['mean_length'] = avg
stats['assembly']['N50'] = n50
stats['assembly']['L50'] = l50
stats['assembly']['N90'] = n90
stats['assembly']['L90'] = l90
Genes = {}
if tbl:
Genes = tbl2dict(tbl, fasta, Genes)
elif gff:
Genes = gff2dict(gff, fasta, Genes)
if len(Genes) > 0:
protLengths = []
geneLengths = []
exonLengths = []
for k, v in Genes.items():
stats['annotation']['genes'] += 1
gLength = v['location'][1] - v['location'][0]
geneLengths.append(gLength)
if v['type'] == 'tRNA':
stats['annotation']['tRNA'] += 1
elif v['type'] == 'rRNA':
stats['annotation']['rRNA'] += 1
elif v['type'] == 'ncRNA':
stats['annotation']['ncRNA'] += 1
if v['name']:
stats['annotation']['common_name'] += 1
for i in range(0, len(v['ids'])):
if v['type'] == 'mRNA':
stats['annotation']['mRNA'] += 1
stats['annotation']['transcript-level']['CDS_transcripts'] += 1
pLen = len(v['protein'][i])
if v['protein'][i].endswith('*'):
pLen -= 1
protLengths.append(pLen)
if len(v['mRNA'][i]) > 1:
stats['annotation']['transcript-level']['multiple_exon_transcript'] += 1
for y in v['mRNA'][i]:
exon_length = y[1] - y[0]
exonLengths.append(exon_length)
else:
stats['annotation']['transcript-level']['single_exon_transcript'] += 1
stats['annotation']['transcript-level']['total_exons'] += len(v['mRNA'][i])
stats['annotation']['transcript-level']['total_exons'] += len(v['5UTR'][i])
stats['annotation']['transcript-level']['total_exons'] += len(v['3UTR'][i])
stats['annotation']['transcript-level']['total_cds_exons'] += len(v['CDS'][i])
if v['partialStart'][i] and v['partialStop'][i]:
stats['annotation']['transcript-level']['CDS_no-start_no-stop'] += 1
elif v['partialStart'][i]:
stats['annotation']['transcript-level']['CDS_no-start'] += 1
elif v['partialStop'][i]:
stats['annotation']['transcript-level']['CDS_no-stop'] += 1
else:
stats['annotation']['transcript-level']['CDS_complete'] += 1
if len(v['5UTR'][i]) > 0 and len(v['3UTR'][i]) > 0:
stats['annotation']['transcript-level']['CDS_five_three_utr'] += 1
elif len(v['3UTR'][i]) > 0:
stats['annotation']['transcript-level']['CDS_three_utr'] += 1
elif len(v['5UTR'][i]) > 0:
stats['annotation']['transcript-level']['CDS_three_utr'] += 1
else:
stats['annotation']['transcript-level']['CDS_no_utr'] += 1
if v['go_terms'][i]:
stats['annotation']['transcript-level']['functional']['go_terms'] += 1
if any(s.startswith('PFAM:') for s in v['db_xref'][i]):
stats['annotation']['transcript-level']['functional']['pfam'] += 1
if any(s.startswith('InterPro:') for s in v['db_xref'][i]):
stats['annotation']['transcript-level']['functional']['interproscan'] += 1
if any(s.startswith('EggNog:') for s in v['note'][i]):
stats['annotation']['transcript-level']['functional']['eggnog'] += 1
if any(s.startswith('CAZy:') for s in v['note'][i]):
stats['annotation']['transcript-level']['functional']['cazyme'] += 1
if any(s.startswith('MEROPS:') for s in v['note'][i]):
stats['annotation']['transcript-level']['functional']['merops'] += 1
if any(s.startswith('BUSCO:') for s in v['note'][i]):
stats['annotation']['transcript-level']['functional']['busco'] += 1
if any(s.startswith('SECRETED:') for s in v['note'][i]):
stats['annotation']['transcript-level']['functional']['secretion'] += 1
stats['annotation']['avg_gene_length'] = round(sum(geneLengths) / float(len(geneLengths)), 2)
stats['annotation']['transcript-level']['avg_protein_length'] = round(sum(protLengths) / float(len(protLengths)), 2)
stats['annotation']['transcript-level']['avg_exon_length'] = round(sum(exonLengths) / float(len(exonLengths)), 2)
exonBED = 'tmp.exon.{}.bed'.format(os.getpid())
if transcripts or proteins:
exonCount = 0
bedtools_cmd = ['bedtools', 'intersect', '-a', exonBED,
'-u', '-f', str(pct), '-s', '-b']
with open(exonBED, 'w') as outfile:
for k, v in Genes.items():
for i in range(0, len(v['ids'])):
for z, x in enumerate(v['mRNA'][i]):
exonCount += 1
outfile.write('{}\t{}\t{}\t{}.exon{}\t.\t{}\n'.format(
v['contig'], x[0]-1, x[1], v['ids'][i], z+1, v['strand']))
if transcripts: # calculate exons covered by transcripts
cmd = bedtools_cmd + [transcripts]
overlapCount = 0
for line in execute(cmd):
overlapCount += 1
pctOverlap = '{:.2f}'.format(overlapCount/exonCount*100)
stats['annotation']['transcript-level']['pct_exon_overlap_transcript_evidence'] = float(pctOverlap)
if proteins: # calculate exons covered by proteins
cmd = bedtools_cmd + [proteins]
overlapCount = 0
for line in execute(cmd):
overlapCount += 1
pctOverlap = '{:.2f}'.format(overlapCount/exonCount*100)
stats['annotation']['transcript-level']['pct_exon_overlap_protein_evidence'] = float(pctOverlap)
if os.path.isfile(exonBED):
os.remove(exonBED)
# write to json format
with open(output, 'w') as outfile:
json.dump(stats, outfile, indent=4)
def tbl2allout(input, fasta, GFF, Proteins, Transcripts, cdsTranscripts, DNA):
'''
function to convert NCBI tbl format directly to other formats; this will be a replacement
for Genbank derived output files and correctly parse/print the transcript/proteins
'''
Genes = {}
Genes = tbl2dict(input, fasta, Genes)
# write GFF
dict2gff3(Genes, GFF)
# write to protein and transcripts
dict2nucleotides2(Genes, Proteins, Transcripts, cdsTranscripts)
# copy over DNA fasta file
shutil.copyfile(fasta, DNA)
def tbl2dict(input, fasta, Genes):
'''
need a method to convert directly from NCBI tbl format to several output formats
to avoid conversion problems with GBK files that have mutliple transcripts
if can load funannotate dictionary directly from tbl format, then can write the other
formats directly
'''
with open(input, 'r') as infile:
contig = ''
for item in readBlocks2(infile, '>Feature', '\tgene\n'):
if item[0].startswith('>Feature'): # this will be contig header block
contig = item[0].rstrip().split(' ')[-1]
else: # these are all gene model blocks
geneID, Name, type, start, end, fivepartial, threepartial, strand, location = (
None,)*9
codon_start = []
transcriptID = []
proteinID = []
synonyms = []
product = []
first, firstpartial, second, secondpartial = (False,)*4
position = None
# check number of transcripts
tNum = 0
for z in item:
if z.startswith('\t\t\ttranscript_id'):
tNum += 1
if tNum > 0:
tNum = int(tNum / 2)
else:
tNum = 1
# setup lists for transcripts
mRNA = [[] for y in range(tNum)]
CDS = [[] for y in range(tNum)]
note = [[] for y in range(tNum)]
dbxref = [[] for y in range(tNum)]
ECnum = [[] for y in range(tNum)]
go_terms = [[] for y in range(tNum)]
fivepartial = [False, ]*tNum
threepartial = [False, ]*tNum
currentNum = 0
for x in item:
exonF, exonR, cdsF, cdsR, cols = (None,)*5
if x.endswith('\tgene\n') and not position:
cols = x.strip().split('\t')
position = 'gene'
if cols[0].startswith('<'):
first = int(cols[0].split('<')[-1])
else:
first = int(cols[0])
if cols[1].startswith('>'):
second = int(cols[1].split('>')[-1])
else:
second = int(cols[1])
if first < second:
start = first
end = second
strand = '+'
else:
start = second
end = first
strand = '-'
location = (start, end)
elif x.startswith('\t\t\tgene\t'):
Name = x.strip().split('\t')[-1]
elif x.startswith('\t\t\tlocus_tag\t'):
geneID = x.strip().split('\t')[-1]
elif x.endswith('\ttRNA\n') and x.count('\t') == 2 and position == 'gene':
type = 'tRNA'
position = 'tRNA'
cols = x.strip().split('\t')
exonF = int(cols[0].replace('<', ''))
exonR = int(cols[1].replace('>', ''))
if strand == '+':
mRNA[currentNum].append((exonF, exonR))
else:
mRNA[currentNum].append((exonR, exonF))
elif x.endswith('\tncRNA\n') and x.count('\t') == 2 and position == 'gene':
type = 'ncRNA'
position = 'ncRNA'
cols = x.strip().split('\t')
exonF = int(cols[0].replace('<', ''))
exonR = int(cols[1].replace('>', ''))
if strand == '+':
mRNA[currentNum].append((exonF, exonR))
else:
mRNA[currentNum].append((exonR, exonF))
elif x.endswith('\trRNA\n') and x.count('\t') == 2 and position == 'gene':
type = 'rRNA'
position = 'rRNA'
cols = x.strip().split('\t')
exonF = int(cols[0].replace('<', ''))
exonR = int(cols[1].replace('>', ''))
if strand == '+':
mRNA[currentNum].append((exonF, exonR))
else:
mRNA[currentNum].append((exonR, exonF))
elif x.endswith('\tmRNA\n') and x.count('\t') == 2:
if position == 'CDS':
currentNum += 1
elif position == 'gene':
type = 'mRNA'
position = 'mRNA'
cols = x.strip().split('\t')
exonF = int(cols[0].replace('<', ''))
exonR = int(cols[1].replace('>', ''))
if strand == '+':
mRNA[currentNum].append((exonF, exonR))
else:
mRNA[currentNum].append((exonR, exonF))
elif x.endswith('\tCDS\n') and x.count('\t') == 2:
position = 'CDS'
cols = x.strip().split('\t')
cdsF = int(cols[0].replace('<', ''))
cdsR = int(cols[1].replace('>', ''))
if strand == '+':
CDS[currentNum].append((cdsF, cdsR))
else:
CDS[currentNum].append((cdsR, cdsF))
elif x.startswith('\t\t\tcodon_start\t'):
cNum = int(x.strip().split('\t')[-1])
codon_start.append(cNum)
elif x.startswith('\t\t\tproduct\t') and position != 'mRNA':
product.append(x.strip().split('\t')[-1])
elif x.startswith('\t\t\ttranscript_id\t'):
tID = x.strip().split('|')[-1]
if '_mrna' in tID:
tID = tID.replace('_mrna', '')
if not tID in transcriptID:
transcriptID.append(tID)
elif x.startswith('\t\t\tprotein_id\t'):
pID = x.strip().split('|')[-1]
if not pID in proteinID:
proteinID.append(pID)
elif x.startswith('\t\t\tgene_synonym\t'):
synonyms.append(x.strip().split('\t')[-1])
elif x.startswith('\t\t\tgo_'): # go terms
go_terms[currentNum].append(
'GO:{:}'.format(x.strip().split('|')[1]))
elif x.startswith('\t\t\tnote\t'):
note[currentNum].append(x.strip().split('\t')[-1])
elif x.startswith('\t\t\tdb_xref\t'):
dbxref[currentNum].append(x.strip().split('\t')[-1])
elif x.startswith('\t\t\tEC_number\t'):
ECnum[currentNum].append(x.strip().split('\t')[-1])
elif position == 'mRNA' and x.count('\t') == 1:
cols = x.strip().split('\t')
exonF = int(cols[0].replace('<', ''))
exonR = int(cols[1].replace('>', ''))
if strand == '+':
mRNA[currentNum].append((exonF, exonR))
else:
mRNA[currentNum].append((exonR, exonF))
elif position in ['tRNA', 'ncRNA', 'rRNA'] and x.count('\t') == 1:
cols = x.strip().split('\t')
exonF = int(cols[0].replace('<', ''))
exonR = int(cols[1].replace('>', ''))
if strand == '+':
mRNA[currentNum].append((exonF, exonR))
else:
mRNA[currentNum].append((exonR, exonF))
elif position == 'CDS' and x.count('\t') == 1:
cols = x.strip().split('\t')
cdsF = int(cols[0].replace('<', ''))
cdsR = int(cols[1].replace('>', ''))
if strand == '+':
CDS[currentNum].append((cdsF, cdsR))
else:
CDS[currentNum].append((cdsR, cdsF))
if not geneID in Genes:
if type in ['tRNA', 'ncRNA', 'rRNA']:
Genes[geneID] = {'name': Name, 'type': type,
'transcript': [],
'cds_transcript': [],
'protein': [], '5UTR': [[]],
'3UTR': [[]],
'codon_start': codon_start,
'ids': [geneID+'-T1'], 'CDS': CDS,
'mRNA': mRNA, 'strand': strand,
'gene_synonym': synonyms,
'location': location,
'contig': contig,
'product': product,
'source': 'funannotate', 'phase': [],
'db_xref': dbxref,
'go_terms': go_terms,
'EC_number': ECnum, 'note': note,
'partialStart': [True],
'partialStop': [True],
'pseudo': False
}
else:
Genes[geneID] = {'name': Name, 'type': type,
'transcript': [], 'cds_transcript': [],
'protein': [], '5UTR': [], '3UTR': [],
'codon_start': codon_start,
'ids': proteinID, 'CDS': CDS,
'mRNA': mRNA, 'strand': strand,
'gene_synonym': synonyms,
'location': location,
'contig': contig, 'product': product,
'source': 'funannotate', 'phase': [],
'db_xref': dbxref,
'go_terms': go_terms,
'EC_number': ECnum, 'note': note,
'partialStart': fivepartial,
'partialStop': threepartial,
'pseudo': False
}
# now we need to sort coordinates, get protein/transcript sequences and capture UTRs
SeqRecords = SeqIO.to_dict(SeqIO.parse(fasta, 'fasta'))
for k, v in list(Genes.items()):
# @nextgenusfs we should clarify or rename this variable to indicate
# i is the i-th transcript, right??
for i in range(0, len(v['ids'])):
if v['type'] in ['mRNA', 'tRNA', 'ncRNA']:
if v['strand'] == '+':
sortedExons = sorted(v['mRNA'][i], key=lambda tup: tup[0])
else:
sortedExons = sorted(v['mRNA'][i], key=lambda tup: tup[0],
reverse=True)
Genes[k]['mRNA'][i] = sortedExons
mrnaSeq = getSeqRegions(SeqRecords, v['contig'], sortedExons)
Genes[k]['transcript'].append(mrnaSeq)
if v['type'] == 'mRNA':
if v['strand'] == '+':
sortedCDS = sorted(v['CDS'][i], key=lambda tup: tup[0])
else:
sortedCDS = sorted(v['CDS'][i], key=lambda tup: tup[0],
reverse=True)
cdsSeq = getSeqRegions(SeqRecords, v['contig'], sortedCDS)
# If the translation starts in middle of a codon,
# we need to truncate the CDS seq either at start or end
# depending on strand.
if v['codon_start'][i] > 1:
if v['strand'] == "+":
# drop first N bases based on codon_start
# to reflect the translation frame
cdsSeq = cdsSeq[v['codon_start'][i]-1:]
elif v['strand'] == "-":
# drop last N bases based on codon_start
# to reflect the translation frame (because this is
# is reverse strand gene)
endTrunc = len(cdsSeq) - (v['codon_start'][i] - 1)
cdsSeq = cdsSeq[0:endTrunc]
else:
# could trigger more of a warning/error
print("ERROR strand (%s) is nonsensical for %s"%(v['strand'],k))
Genes[k]['cds_transcript'].append(cdsSeq)
Genes[k]['CDS'][i] = sortedCDS
protSeq = translate(cdsSeq, v['strand'],0)
if protSeq:
Genes[k]['protein'].append(protSeq)
if protSeq.endswith('*'):
Genes[k]['partialStop'][i] = False
else:
Genes[k]['partialStop'][i] = True
if v['codon_start'][i] == 1 and protSeq.startswith('M'):
Genes[k]['partialStart'][i] = False
else:
Genes[k]['partialStart'][i] = True
# get UTRs
try:
FiveUTR, ThreeUTR = findUTRs(sortedCDS, sortedExons,
v['strand'])
Genes[k]['5UTR'].append(FiveUTR)
Genes[k]['3UTR'].append(ThreeUTR)
except ValueError:
print(('ERROR', k, v))
return Genes
def dicts2tbl(genesDict, scaff2genes, scaffLen, SeqCenter, SeqRefNum, skipList,
output, annotations=False, external=False):
'''
function to take funannotate annotation dictionaries and convert to NCBI tbl output
'''
duplicates = 0
pseudo = 0
nocds = 0
# to parse annotations, will need to have access to GO OBO dictionary
goDict = {}
if annotations:
from goatools import obo_parser
# location of go.obo
for item in obo_parser.OBOReader(os.path.join(os.environ["FUNANNOTATE_DB"], 'go.obo')):
goDict[item.id] = {'name': item.name, 'namespace': item.namespace}
def _goFormat(id, goDict=goDict):
# go_function serine-type endopeptidase activity|0004252||IEA
# go_process proteolysis|0006508||IEA
# go_component nucleus|0005634||IEA
if id in goDict:
if goDict[id]['namespace'] == 'biological_process':
base = 'go_process'
elif goDict[id]['namespace'] == 'molecular_function':
base = 'go_function'
elif goDict[id]['namespace'] == 'cellular_component':
base = 'go_component'
reformatted = '\t\t\t{:}\t{:}|{:}||IEA'.format(
base, goDict[id]['name'], id.replace('GO:', ''))
return reformatted
else:
return False
with open(output, 'w') as tbl:
for k, v in natsorted(list(scaff2genes.items())):
tbl.write('>Feature %s\n' % k)
tbl.write('1\t%s\tREFERENCE\n' % scaffLen.get(k))
tbl.write('\t\t\t%s\t%s\n' % (SeqCenter, SeqRefNum))
for genes in v: # now loop through each gene on the scaffold
if genes in skipList:
continue
# single funannotate standard dictionary
geneInfo = genesDict.get(genes)
if 'pseudo' in geneInfo:
if geneInfo['pseudo']:
try:
log.debug('{:} is pseudo, skipping'.format(genes))
except NameError:
print(('{:} is pseudo, skipping'.format(genes)))
pseudo += 1
continue
if geneInfo['type'] == 'mRNA' and not geneInfo['CDS']:
try:
log.debug(
'Skipping {:} because no CDS found.'.format(genes))
except NameError:
print((
'Skipping {:} because no CDS found.'.format(genes)))
pseudo += 1
continue
if geneInfo['type'] == 'mRNA' and not len(geneInfo['ids']) == len(geneInfo['mRNA']) == len(geneInfo['CDS']):
try:
log.debug('Incompatible annotation found: {:}\n{:}'.format(
genes, geneInfo))
except NameError:
print(('Incompatible annotation found: {:}\n{:}'.format(
genes, geneInfo)))
duplicates += 1
continue
if geneInfo['type'] == 'mRNA' and len(geneInfo['CDS']) == 0:
nocds += 1
continue
if geneInfo['type'] is None:
continue
# check for partial models
if True in geneInfo['partialStart']:
ps = '<'
else:
ps = ''
if True in geneInfo['partialStop']:
pss = '>'
else:
pss = ''
# now write gene model
if geneInfo['strand'] == '+':
tbl.write('%s%i\t%s%i\tgene\n' % (
ps, geneInfo['location'][0], pss, geneInfo['location'][1]))
if annotations:
if geneInfo['name']:
tbl.write('\t\t\tgene\t%s\n' % geneInfo['name'])
if geneInfo['gene_synonym']:
for alias in geneInfo['gene_synonym']:
tbl.write('\t\t\tgene_synonym\t%s\n' % alias)
tbl.write('\t\t\tlocus_tag\t%s\n' % genes)
else:
tbl.write('%s%i\t%s%i\tgene\n' % (
ps, geneInfo['location'][1], pss, geneInfo['location'][0]))
if annotations:
if geneInfo['name']:
tbl.write('\t\t\tgene\t%s\n' % geneInfo['name'])
if geneInfo['gene_synonym']:
for alias in geneInfo['gene_synonym']:
tbl.write('\t\t\tgene_synonym\t%s\n' % alias)
tbl.write('\t\t\tlocus_tag\t%s\n' % genes)
# now will output the gene models with -T1, -T2, -T3 annotations based on expression values
# means need to get the order
order = []
# multiple transcripts, so get order of highest TPM
if len(geneInfo['ids']) > 1:
tpms = []
for num, tpm in enumerate(geneInfo['note']):
for item in tpm:
if item.startswith('TPM:'):
value = float(item.split(':')[-1])
tpms.append((value, num))
if len(tpms) > 0:
for x in sorted(tpms, reverse=True):
order.append(x[1])
else:
order = list(range(0, len(geneInfo['ids'])))
else:
order.append(0)
for num, i in enumerate(order): # now write mRNA and CDS features
# if geneInfo['ids'][i].startswith('evm.model'): #if from predict, rename to match locus_tag
# protein_id = genes+'-T'+str(num+1)
# else:
# protein_id = geneInfo['ids'][i]
if external:
protein_id = geneInfo['ids'][i]
else:
protein_id = genes+'-T'+str(num+1)
if geneInfo['type'] == 'mRNA':
if geneInfo['partialStart'][i] is False:
ps = ''
else:
ps = '<'
if geneInfo['partialStop'][i] is False:
pss = ''
else:
pss = '>'
if geneInfo['strand'] == '+':
for num, exon in enumerate(geneInfo['mRNA'][i]):
# single exon, so slightly differnt method
if num == 0 and num == len(geneInfo['mRNA'][i]) - 1:
tbl.write('%s%s\t%s%s\tmRNA\n' %
(ps, exon[0], pss, exon[1]))
elif num == 0:
tbl.write('%s%s\t%s\tmRNA\n' %
(ps, exon[0], exon[1]))
# this is last one
elif num == len(geneInfo['mRNA'][i]) - 1:
tbl.write('%s\t%s%s\n' %
(exon[0], pss, exon[1]))
else:
tbl.write('%s\t%s\n' % (exon[0], exon[1]))
tbl.write('\t\t\tproduct\t%s\n' %
geneInfo['product'][i])
tbl.write('\t\t\ttranscript_id\tgnl|ncbi|%s_mrna\n' % (protein_id))
tbl.write('\t\t\tprotein_id\tgnl|ncbi|%s\n' %
(protein_id))
for num, cds in enumerate(geneInfo['CDS'][i]):
# single exon, so slightly differnt method
if num == 0 and num == len(geneInfo['CDS'][i]) - 1:
tbl.write('%s%s\t%s%s\tCDS\n' %
(ps, cds[0], pss, cds[1]))
elif num == 0:
tbl.write('%s%s\t%s\tCDS\n' %
(ps, cds[0], cds[1]))
# this is last one
elif num == len(geneInfo['CDS'][i]) - 1:
tbl.write('%s\t%s%s\n' %
(cds[0], pss, cds[1]))
else:
tbl.write('%s\t%s\n' % (cds[0], cds[1]))
tbl.write('\t\t\tcodon_start\t%i\n' %
geneInfo['codon_start'][i])
if annotations: # write functional annotation
if geneInfo['EC_number'][i]:
for EC in geneInfo['EC_number'][i]:
tbl.write('\t\t\tEC_number\t%s\n' % EC)
if geneInfo['db_xref'][i]:
for xref in geneInfo['db_xref'][i]:
tbl.write('\t\t\tdb_xref\t%s\n' % xref)
if geneInfo['go_terms'][i]:
for go in geneInfo['go_terms'][i]:
goLine = _goFormat(go)
if goLine:
tbl.write('{:}\n'.format(goLine))
if geneInfo['note'][i]:
for item in geneInfo['note'][i]:
tbl.write('\t\t\tnote\t%s\n' % item)
tbl.write('\t\t\tproduct\t%s\n' %
geneInfo['product'][i])
tbl.write('\t\t\ttranscript_id\tgnl|ncbi|%s_mrna\n' % (protein_id))
tbl.write('\t\t\tprotein_id\tgnl|ncbi|%s\n' %
(protein_id))
else: # means this is on crick strand
for num, exon in enumerate(geneInfo['mRNA'][i]):
# single exon, so slightly differnt method
if num == 0 and num == len(geneInfo['mRNA'][i]) - 1:
tbl.write('%s%s\t%s%s\tmRNA\n' %
(ps, exon[1], pss, exon[0]))
elif num == 0:
tbl.write('%s%s\t%s\tmRNA\n' %
(ps, exon[1], exon[0]))
# this is last one
elif num == len(geneInfo['mRNA'][i]) - 1:
tbl.write('%s\t%s%s\n' %
(exon[1], pss, exon[0]))
else:
tbl.write('%s\t%s\n' % (exon[1], exon[0]))
tbl.write('\t\t\tproduct\t%s\n' %
geneInfo['product'][i])
tbl.write('\t\t\ttranscript_id\tgnl|ncbi|%s_mrna\n' % (protein_id))
tbl.write('\t\t\tprotein_id\tgnl|ncbi|%s\n' %
(protein_id))
for num, cds in enumerate(geneInfo['CDS'][i]):
# single exon, so slightly differnt method
if num == 0 and num == len(geneInfo['CDS'][i]) - 1:
tbl.write('%s%s\t%s%s\tCDS\n' %
(ps, cds[1], pss, cds[0]))
elif num == 0:
tbl.write('%s%s\t%s\tCDS\n' %
(ps, cds[1], cds[0]))
# this is last one
elif num == (len(geneInfo['CDS'][i]) - 1):
tbl.write('%s\t%s%s\n' %
(cds[1], pss, cds[0]))
else:
tbl.write('%s\t%s\n' % (cds[1], cds[0]))
tbl.write('\t\t\tcodon_start\t%i\n' %
geneInfo['codon_start'][i])
if annotations: # write functional annotation
if geneInfo['EC_number'][i]:
for EC in geneInfo['EC_number'][i]:
tbl.write('\t\t\tEC_number\t%s\n' % EC)
if geneInfo['db_xref'][i]:
for xref in geneInfo['db_xref'][i]:
tbl.write('\t\t\tdb_xref\t%s\n' % xref)
if geneInfo['go_terms'][i]:
for go in geneInfo['go_terms'][i]:
goLine = _goFormat(go)
if goLine:
tbl.write('{:}\n'.format(goLine))
if geneInfo['note'][i]:
for item in geneInfo['note'][i]:
tbl.write('\t\t\tnote\t%s\n' % item)
tbl.write('\t\t\tproduct\t%s\n' %
geneInfo['product'][i])
tbl.write('\t\t\ttranscript_id\tgnl|ncbi|%s_mrna\n' % (protein_id))
tbl.write('\t\t\tprotein_id\tgnl|ncbi|%s\n' %
(protein_id))
elif geneInfo['type'] == 'tRNA':
if geneInfo['strand'] == '+':
for num, exon in enumerate(geneInfo['mRNA'][i]):
if num == 0:
tbl.write('%s\t%s\t%s\n' % (
exon[0], exon[1], geneInfo['type']))
else:
tbl.write('%s\t%s\n' % (exon[0], exon[1]))
tbl.write('\t\t\tproduct\t%s\n' %
geneInfo['product'][i])
if geneInfo['product'] == 'tRNA-Xxx':
tbl.write('\t\t\tpseudo\n')
else:
for num, exon in enumerate(geneInfo['mRNA'][i]):
if num == 0:
tbl.write('%s\t%s\t%s\n' % (
exon[1], exon[0], geneInfo['type']))
else:
tbl.write('%s\t%s\n' % (exon[1], exon[0]))
tbl.write('\t\t\tproduct\t%s\n' %
geneInfo['product'][i])
if geneInfo['product'] == 'tRNA-Xxx':
tbl.write('\t\t\tpseudo\n')
elif geneInfo['type'] in ['rRNA', 'ncRNA']:
if geneInfo['strand'] == '+':
tbl.write('%s\t%s\t%s\n' % (
geneInfo['location'][0], geneInfo['location'][1], geneInfo['type']))
tbl.write('\t\t\tproduct\t%s\n' %
geneInfo['product'][i])
else:
tbl.write('%s\t%s\t%s\n' % (
geneInfo['location'][1], geneInfo['location'][0], geneInfo['type']))
tbl.write('\t\t\tproduct\t%s\n' %
geneInfo['product'][i])
if any(i > 0 for i in [duplicates, pseudo, nocds]):
try:
print(('Skipped {:,} annotations: {:,} pseudo genes; {:,} no CDS; {:,} duplicated features'.format(
sum([pseudo, nocds, duplicates]), pseudo, nocds, duplicates)))
except NameError:
print(('Skipped {:,} annotations: {:,} pseudo genes; {:,} no CDS; {:,} duplicated features'.format(
sum([pseudo, nocds, duplicates]), pseudo, nocds, duplicates)))
def GFF2tbl(evm, trnascan, fasta, scaffLen, prefix, Numbering, SeqCenter,
SeqRefNum, tblout):
from collections import OrderedDict
'''
function to take EVM protein models and tRNA scan GFF to produce a GBK tbl file as well
as a new GFF3 file. The function will also rename locus_id if passed.
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][1])
# load GFF into dictionary
Genes = {}
Genes = gff2dict(evm, fasta, Genes)
Genes = gff2dict(trnascan, fasta, Genes)
# now sort dictionary by contig and location, rename using prefix, translate to protein space to get proper start/stop info
sGenes = natsorted(iter(Genes.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
renamedGenes = {}
scaff2genes = {}
count = Numbering
for k, v in list(sortedGenes.items()):
if prefix:
locusTag = prefix+'_'+str(count).zfill(6)
else:
locusTag = k
renamedGenes[locusTag] = v
if not v['contig'] in scaff2genes:
scaff2genes[v['contig']] = [locusTag]
else:
scaff2genes[v['contig']].append(locusTag)
count += 1
# write tbl outputfile
dicts2tbl(renamedGenes, scaff2genes, scaffLen,
SeqCenter, SeqRefNum, [], tblout)
def checkRefSeq(input):
refseq = False
with open(input, 'r') as infile:
for record in SeqIO.parse(infile, 'genbank'):
if 'RefSeq' in record.annotations['keywords']:
refseq = True
break
return refseq
def getGBKinfo(input):
accession = None
organism = None
strain = None
isolate = None
gb_gi = None
WGS_accession = None
version = None
with open(input, 'r') as infile:
for record in SeqIO.parse(infile, 'genbank'):
try:
WGS_accession = 'WGS:' + \
record.annotations['contig'].split(
':')[0].replace('join(', '')[:4]
except KeyError:
pass
try:
accession = record.annotations['accessions'][0]
except KeyError:
pass
try:
organism = record.annotations['organism'].replace(
'Unclassified.', '').rstrip()
except KeyError:
pass
try:
gb_gi = record.annotations['gi']
except KeyError:
pass
try:
version = record.annotations['sequence_version']
except KeyError:
pass
for f in record.features:
if f.type == "source":
isolate = f.qualifiers.get("isolate", [None])[0]
strain = f.qualifiers.get("strain", [None])[0]
break
return organism, strain, isolate, accession, WGS_accession, gb_gi, version
def getGBKLocusTag(input):
LocusTags = []
with open(input, 'r') as infile:
for record in SeqIO.parse(infile, 'genbank'):
for f in record.features:
if f.type == 'gene':
ID = f.qualifiers['locus_tag'][0]
if not ID in LocusTags:
LocusTags.append(ID)
lastTag = natsorted(LocusTags)[-1]
if not '_' in lastTag:
print('ERROR: underscore "_" not found in locus_tag, exiting.')
sys.exit(1)
tag, count = lastTag.rsplit('_', 1)
justify = len(count)
return tag, count, justify
def gb2dna(input, output):
with open(output, 'w') as outfile:
with open(input, 'r') as infile:
for record in SeqIO.parse(infile, 'genbank'):
outfile.write(">%s\n%s\n" %
(record.id, softwrap(str(record.seq))))
def getID(input, type):
# function to get ID from genbank record.features
locusTag = None
ID = None
Parent = None
if type == 'gene':
try:
locusTag = input.qualifiers['locus_tag'][0]
except KeyError:
pass
if not locusTag:
try:
locusTag = input.qualifiers['gene'][0]
except KeyError:
pass
else:
try:
ID = input.qualifiers['gene'][0]
except KeyError:
pass
return locusTag, ID, locusTag
elif type in ['mRNA', 'tRNA', 'ncRNA', 'rRNA', 'misc_RNA', 'exon']:
try:
locusTag = input.qualifiers['locus_tag'][0]
Parent = locusTag
except KeyError:
pass
if not locusTag:
try:
locusTag = input.qualifiers['gene'][0]
except KeyError:
pass
if locusTag:
Parent = locusTag
try:
ID = input.qualifiers['transcript_id'][0]
except KeyError:
pass
else:
try:
locusTag = input.qualifiers['transcript_id'][0]
Parent = locusTag
except KeyError:
pass
else:
try:
ID = input.qualifiers['transcript_id'][0]
except KeyError:
pass
if ID:
if ':' in ID:
ID = ID.split(':')[-1]
else:
try:
ID = input.qualifiers['standard_name'][0]
except KeyError:
pass
return locusTag, ID, Parent
elif type == 'CDS':
try:
locusTag = input.qualifiers['locus_tag'][0]
Parent = locusTag
except KeyError:
pass
if not locusTag:
try:
locusTag = input.qualifiers['gene'][0]
except KeyError:
pass
if locusTag:
Parent = locusTag
try:
ID = input.qualifiers['protein_id'][0]
except KeyError:
pass
else:
try:
locusTag = input.qualifiers['protein_id'][0]
Parent = locusTag
except KeyError:
pass
else:
try:
ID = input.qualifiers['protein_id'][0]
except KeyError:
ID = locusTag
if ID:
if ':' in ID:
ID = ID.split(':')[-1]
else:
try:
ID = input.qualifiers['standard_name'][0]
except KeyError:
pass
return locusTag, ID, Parent
def gb2nucleotides(input, prots, trans, dna):
'''
function to generate protein, transcripts, and contigs from genbank file
'''
genes = {}
with open(dna, 'w') as dnaout:
with open(input, 'r') as filein:
for record in SeqIO.parse(filein, 'genbank'):
dnaout.write(">%s\n%s\n" %
(record.id, softwrap(str(record.seq))))
for f in record.features:
gb_feature_add2dict(f, record, genes)
# write to protein and transcripts
dict2nucleotides(genes, prots, trans)
return len(genes)
def dict2proteins(input, prots):
with open(prots, 'w') as protout:
for k, v in natsorted(list(input.items())):
if 'pseudo' in v:
if v['pseudo']:
continue
if v['type'] == 'mRNA' and not v['CDS']:
continue
if v['type'] == 'mRNA' and not len(v['ids']) == len(v['mRNA']) == len(v['CDS']):
continue
for i, x in enumerate(v['ids']):
if v['type'] == 'mRNA':
Prot = v['protein'][i]
protout.write('>{:} {:}\n{:}\n'.format(
x, k, softwrap(Prot)))
def dict2nucleotides(input, prots, trans):
'''
function to generate protein and transcripts from dictionary
'''
# write to protein and transcripts
with open(prots, 'w') as protout:
with open(trans, 'w') as tranout:
for k, v in natsorted(list(input.items())):
if 'pseudo' in v:
if v['pseudo']:
continue
if v['type'] == 'mRNA' and not v['CDS']:
continue
if v['type'] == 'mRNA' and not len(v['ids']) == len(v['mRNA']) == len(v['CDS']):
continue
for i, x in enumerate(v['ids']):
try:
Transcript = str(v['transcript'][i])
tranout.write('>{:} {:}\n{:}\n'.format(
x, k, softwrap(Transcript)))
except IndexError:
pass
if v['type'] == 'mRNA':
Prot = v['protein'][i]
protout.write('>{:} {:}\n{:}\n'.format(
x, k, softwrap(Prot)))
def gb2gffnuc(input, gff, prots, trans, dna):
'''
function to generate protein, transcripts, and contigs from genbank file
'''
genes = {}
with open(dna, 'w') as dnaout:
with open(input, 'r') as filein:
for record in SeqIO.parse(filein, 'genbank'):
dnaout.write(">{:}\n{:}\n".format(
record.id, softwrap(str(record.seq))))
for f in record.features:
gb_feature_add2dict(f, record, genes)
# write gff3 output
dict2gff3(genes, gff)
# write to protein and transcripts
dict2nucleotides(genes, prots, trans)
return len(genes)
def gb2parts(input, tbl, gff, prots, trans, dna):
'''
function returns a dictionary of all gene models from a genbank file this function
can handle multiple transcripts per locus/gene
'''
genes = {}
scaff2genes = {}
scaffLen = {}
with open(dna, 'w') as dnaout:
with open(input, 'r') as filein:
for record in SeqIO.parse(filein, 'genbank'):
dnaout.write(">{:}\n{:}\n".format(
record.id, softwrap(str(record.seq))))
Contig = record.id
if not Contig in scaffLen:
scaffLen[Contig] = len(record.seq)
for f in record.features:
if f.type == 'gene':
locusTag, ID, Parent = getID(f, f.type)
if not Contig in scaff2genes:
scaff2genes[Contig] = [locusTag]
else:
scaff2genes[Contig].append(locusTag)
gb_feature_add2dict(f, record, genes)
# write tbl output
dicts2tbl(genes, scaff2genes, scaffLen, 'CFMR', '12345', [], tbl)
# write gff3 output
dict2gff3_old(genes, gff)
# write to protein and transcripts
dict2nucleotides(genes, prots, trans)
return len(genes)
def gb_feature_add2dict(f, record, genes):
'''
general function to take a genbank feature from flat file and add to funannotate standardized dictionary
locustag: {
'contig': contigName
'type': mRNA/rRNA/tRNA/ncRNA
'location': (start, end) #integer tuple
'strand': +/-
'ids': [transcript/protein IDs] #list
'mRNA':[[(ex1,ex1),(ex2,ex2)]] #list of lists of tuples (start, end)
'CDS':[[(cds1,cds1),(cds2,cds2)]] #list of lists of tuples (start, end)
'transcript': [seq1, seq2] #list of mRNA trnascripts
'cds_transcript': [seq1, seq2] list of mRNA (no UTRs)
'protein': [protseq1,protseq2] #list of CDS translations
'protein_id': [id,id] #from NCBI
'codon_start': [1,1] #codon start for translations
'note': [[first note, second note], [first, second, etc]] #list of lists
'name': genename
'product': [hypothetical protein, velvet complex] #list of product definitions
'go_terms': [[GO:0000001,GO:0000002]] #list of lists
'db_xref': [[InterPro:IPR0001,PFAM:004384]] #list of lists
'partialStart': True/False
'partialStop': True/False
'source': annotation source
'pseudo': True/False
}
'''
# get info from features, if there is no locusTag then exit
if f.type and f.type in ['gene', 'mRNA', 'CDS', 'tRNA', 'rRNA', 'ncRNA', 'exon', 'misc_RNA']:
try:
locusTag, ID, Parent = getID(f, f.type)
except TypeError:
print('ERROR parsing GBK record')
print(f)
sys.exit(1)
if not locusTag:
return genes
else:
return genes
# check for mismatching funannotate ID locus tag basename
if ID and '-T' in ID: # then this is from funannotate, okay to modify - this is to capture apparent tbl2asn local error
# there is a problem, update locusTag with basename of ID
if ID.split('-T')[0] != locusTag:
locusTag = ID.split('-T')[0]
# standard information from every feature
strand = f.location.strand
if strand == 1:
strand = '+'
elif strand == -1:
strand = '-'
start = f.location.nofuzzy_start + 1
end = f.location.nofuzzy_end
chr = record.id
num_parts = len(f.location.parts)
name, Product = (None,)*2
Fivepartial, Threepartial = (False,)*2
DBxref = []
Note = []
GO = []
EC = []
synonyms = []
pseudo = False
if 'pseudo' in f.qualifiers:
pseudo = True
# parse each type somewhat differently
if f.type == 'gene':
try:
name = f.qualifiers['gene'][0]
except KeyError:
pass
if 'gene_synonym' in f.qualifiers:
for z in f.qualifiers['gene_synonym']:
synonyms.append(z)
if not locusTag in genes:
genes[locusTag] = {'name': name, 'type': None, 'transcript': [],
'cds_transcript': [], 'protein': [],
'source': 'GenBank', '5UTR': [[]], '3UTR': [[]],
'codon_start': [], 'ids': [], 'CDS': [],
'mRNA': [], 'strand': strand,
'location': (int(start), int(end)),
'contig': chr, 'product': [],
'gene_synonym': synonyms, 'EC_number': [],
'db_xref': [], 'go_terms': [], 'note': [],
'partialStart': [], 'partialStop': [],
'protein_id': [], 'pseudo': pseudo}
else:
genes[locusTag]['location'] = (int(start), int(end))
genes[locusTag]['strand'] = strand
genes[locusTag]['gene_synonym'] = synonyms
if not genes[locusTag]['name']:
genes[locusTag]['name'] = name
elif f.type in ['tRNA', 'rRNA', 'ncRNA', 'misc_RNA']:
feature_seq = f.extract(record.seq)
try:
name = f.qualifiers['gene'][0]
except KeyError:
pass
try:
Product = f.qualifiers['product'][0]
if Product == 'tRNA-OTHER':
Product = 'tRNA-Xxx'
except KeyError:
Product = None
exonTuples = []
if num_parts < 2: # only single exon
exonTuples.append((int(start), int(end)))
else: # more than 1 exon, so loop through
for i in range(0, num_parts):
ex_start = f.location.parts[i].nofuzzy_start + 1
ex_end = f.location.parts[i].nofuzzy_end
exonTuples.append((int(ex_start), int(ex_end)))
# now we want to sort the positions I think...
if strand == '+':
sortedExons = sorted(exonTuples, key=lambda tup: tup[0])
if str(f.location.start).startswith('<'):
Fivepartial = True
if str(f.location.end).startswith('>'):
Threepartial = True
else:
sortedExons = sorted(
exonTuples, key=lambda tup: tup[0], reverse=True)
if str(f.location.start).startswith('<'):
Threepartial = True
if str(f.location.end).startswith('>'):
Fivepartial = True
# update positions
if f.type == 'misc_RNA':
feature = 'ncRNA'
else:
feature = f.type
if not locusTag in genes:
genes[locusTag] = {'name': name, 'type': feature,
'transcript': [feature_seq],
'cds_transcript': [], 'protein': [],
'source': 'GenBank', '5UTR': [[]], '3UTR': [[]],
'codon_start': [], 'ids': [locusTag+'-T1'],
'CDS': [], 'mRNA': [sortedExons],
'strand': strand,
'location': (int(start), int(end)),
'contig': chr, 'product': [Product],
'protein_id': [], 'pseudo': pseudo,
'gene_synonym': synonyms, 'EC_number': [EC],
'db_xref': [DBxref], 'go_terms': [GO],
'note': [Note], 'partialStart': [Fivepartial],
'partialStop': [Threepartial]}
else:
genes[locusTag]['mRNA'].append(sortedExons)
genes[locusTag]['type'] = feature
genes[locusTag]['transcript'].append(feature_seq)
genes[locusTag]['cds_transcript'].append(None)
genes[locusTag]['protein'].append(None)
genes[locusTag]['ids'].append(
locusTag+'-T'+str(len(genes[locusTag]['ids'])+1))
genes[locusTag]['db_xref'].append(DBxref)
genes[locusTag]['note'].append(Note)
genes[locusTag]['go_terms'].append(GO)
genes[locusTag]['EC_number'].append(EC)
genes[locusTag]['product'].append(Product)
genes[locusTag]['partialStart'].append(Fivepartial)
genes[locusTag]['partialStop'].append(Threepartial)
if not genes[locusTag]['name']:
genes[locusTag]['name'] = name
elif f.type == 'mRNA':
feature_seq = f.extract(record.seq)
try:
name = f.qualifiers['gene'][0]
except KeyError:
pass
exonTuples = []
if num_parts < 2: # only single exon
exonTuples.append((int(start), int(end)))
else: # more than 1 exon, so loop through
for i in range(0, num_parts):
ex_start = f.location.parts[i].nofuzzy_start + 1
ex_end = f.location.parts[i].nofuzzy_end
exonTuples.append((int(ex_start), int(ex_end)))
# now we want to sort the positions I think...
if strand == '+':
sortedExons = sorted(exonTuples, key=lambda tup: tup[0])
if str(f.location.start).startswith('<'):
Fivepartial = True
if str(f.location.end).startswith('>'):
Threepartial = True
else:
sortedExons = sorted(
exonTuples, key=lambda tup: tup[0], reverse=True)
if str(f.location.start).startswith('<'):
Threepartial = True
if str(f.location.end).startswith('>'):
Fivepartial = True
# update positions
if not locusTag in genes:
genes[locusTag] = {'name': name, 'type': f.type,
'transcript': [feature_seq],
'cds_transcript': [], 'protein': [],
'source': 'GenBank', '5UTR': [[]], '3UTR': [[]],
'codon_start': [], 'ids': [], 'CDS': [],
'mRNA': [sortedExons], 'strand': strand,
'location': (int(start), int(end)),
'contig': chr, 'product': [], 'protein_id': [],
'pseudo': pseudo, 'gene_synonym': synonyms,
'EC_number': [],
'db_xref': [], 'go_terms': [],
'note': [], 'partialStart': [Fivepartial],
'partialStop': [Threepartial]}
else:
genes[locusTag]['mRNA'].append(sortedExons)
genes[locusTag]['type'] = f.type
genes[locusTag]['transcript'].append(feature_seq)
genes[locusTag]['partialStart'].append(Fivepartial)
genes[locusTag]['partialStop'].append(Threepartial)
if not genes[locusTag]['name']:
genes[locusTag]['name'] = name
elif f.type == 'exon': # assuming need to overwrite mRNA feature then?
if len(genes[locusTag]['mRNA']) == 0:
genes[locusTag]['mRNA'] = []
genes[locusTag]['transcript'] = []
feature_seq = f.extract(record.seq)
try:
name = f.qualifiers['gene'][0]
except KeyError:
pass
exonTuples = []
if num_parts < 2: # only single exon
exonTuples.append((int(start), int(end)))
else: # more than 1 exon, so loop through
for i in range(0, num_parts):
ex_start = f.location.parts[i].nofuzzy_start + 1
ex_end = f.location.parts[i].nofuzzy_end
exonTuples.append((int(ex_start), int(ex_end)))
# now we want to sort the positions I think...
if strand == '+':
sortedExons = sorted(exonTuples, key=lambda tup: tup[0])
if str(f.location.start).startswith('<'):
Fivepartial = True
if str(f.location.end).startswith('>'):
Threepartial = True
else:
sortedExons = sorted(
exonTuples, key=lambda tup: tup[0], reverse=True)
if str(f.location.start).startswith('<'):
Threepartial = True
if str(f.location.end).startswith('>'):
Fivepartial = True
# update positions
if not locusTag in genes:
genes[locusTag] = {'name': name, 'type': f.type,
'transcript': [feature_seq],
'cds_transcript': [], 'protein': [],
'source': 'GenBank', '5UTR': [[]], '3UTR': [[]],
'codon_start': [], 'ids': [], 'CDS': [],
'mRNA': [sortedExons], 'strand': strand,
'location': (int(start), int(end)),
'contig': chr, 'product': [], 'protein_id': [],
'db_xref': [], 'go_terms': [], 'note': [],
'gene_synonym': synonyms, 'EC_number': [],
'partialStart': [Fivepartial],
'partialStop': [Threepartial], 'pseudo': pseudo}
else:
genes[locusTag]['mRNA'].append(sortedExons)
genes[locusTag]['transcript'].append(feature_seq)
genes[locusTag]['partialStart'].append(Fivepartial)
genes[locusTag]['partialStop'].append(Threepartial)
elif f.type == 'CDS' and 'codon_start' in f.qualifiers:
feature_seq = f.extract(record.seq)
if not ID:
try:
log.info("putative transcript from %s has no ID\n(%s %s %s)" % (
locusTag, locusTag, ID, Parent))
except NameError:
print(("putative transcript from %s has no ID\n(%s %s %s)" %
(locusTag, locusTag, ID, Parent)))
return genes
try:
protSeq = f.qualifiers['translation'][0]
except KeyError:
try:
log.debug("%s has no translation" % ID)
except NameError:
print(("%s has no translation" % ID))
protSeq = ''
cdsTuples = []
phase = int(f.qualifiers['codon_start'][0])
if num_parts < 2: # only single CDS
cdsTuples.append((int(start), int(end)))
else:
for i in range(0, num_parts):
ex_start = f.location.parts[i].nofuzzy_start + 1
ex_end = f.location.parts[i].nofuzzy_end
cdsTuples.append((int(ex_start), int(ex_end)))
if strand == '+':
sortedCDS = sorted(cdsTuples, key=lambda tup: tup[0])
else:
sortedCDS = sorted(cdsTuples, key=lambda tup: tup[0], reverse=True)
# check for annotations
try:
Product = f.qualifiers['product'][0]
except KeyError:
Product = 'hypothetical protein'
try:
name = f.qualifiers['gene'][0]
except KeyError:
pass
# note and dbxref are in a dictionary
for key, value in list(f.qualifiers.items()):
if key == 'note':
notes = value[0].split('; ')
for n in notes:
if n.startswith('GO'):
GO.append(n)
else:
Note.append(n)
elif key == 'db_xref':
for ref in value:
DBxref.append(ref)
elif key == 'EC_number':
for x in value:
EC.append(x)
# update dictionary
if not locusTag in genes:
genes[locusTag] = {'name': name, 'type': 'mRNA',
'transcript': [], '5UTR': [[]], '3UTR': [[]],
'cds_transcript': [feature_seq],
'protein': [], 'source': 'GenBank',
'codon_start': [phase], 'ids': [locusTag+'-T1'],
'CDS': [sortedCDS], 'mRNA': [],
'strand': strand,
'location': (int(start), int(end)),
'contig': chr, 'product': [Product],
'gene_synonym': synonyms, 'EC_number': [EC],
'protein_id': [ID],
'db_xref': [DBxref], 'go_terms': [GO],
'note': [Note], 'partialStart': [],
'partialStop': [], 'pseudo': pseudo}
else:
genes[locusTag]['protein_id'].append(ID)
genes[locusTag]['ids'].append(
locusTag+'-T'+str(len(genes[locusTag]['ids'])+1))
genes[locusTag]['CDS'].append(sortedCDS)
genes[locusTag]['5UTR'].append([])
genes[locusTag]['3UTR'].append([])
genes[locusTag]['product'].append(Product)
genes[locusTag]['protein'].append(protSeq)
genes[locusTag]['cds_transcript'].append(feature_seq)
genes[locusTag]['codon_start'].append(phase)
genes[locusTag]['db_xref'].append(DBxref)
genes[locusTag]['note'].append(Note)
genes[locusTag]['go_terms'].append(GO)
genes[locusTag]['EC_number'].append(EC)
if not genes[locusTag]['type']:
genes[locusTag]['type'] = 'mRNA'
if not genes[locusTag]['name']:
genes[locusTag]['name'] = name
return genes
def bed2interlapNames(bedfile):
# load interlap object from a bed file
inter = defaultdict(InterLap)
with open(bedfile, 'r') as infile:
for line in infile:
line = line.strip()
chr, start, end, name = line.split('\t')[:4]
inter[chr].add((int(start), int(end), name))
return inter
def bed2interlap(bedfile):
# load interlap object from a bed file
inter = defaultdict(InterLap)
with open(bedfile, 'r') as infile:
for line in infile:
line = line.strip()
chr, start, end = line.split('\t')[:3]
inter[chr].add((int(start), int(end)))
return inter
def interlapIntersect(coords, contig, interObj):
# return interlap coords of an intersection
if coords in interObj[contig]:
return True
else:
return False
def gff2interlap(input, fasta):
'''
function to parse GFF3 file, construct scaffold/gene interlap dictionary and funannotate standard annotation dictionary
'''
inter = defaultdict(InterLap)
Genes = {}
Genes = gff2dict(input, fasta, Genes)
for k, v in natsorted(list(Genes.items())):
inter[v['contig']].add((v['location'][0], v['location'][1], k))
return inter, Genes
def gff2interlapDict(input, fasta, inter, Dict):
'''
function to parse GFF3 file, construct scaffold/gene interlap dictionary and funannotate standard annotation dictionary
'''
Genes = {}
Genes = gff2dict(input, fasta, Genes, gap_filter=True)
for k, v in natsorted(list(Genes.items())):
inter[v['contig']].add(
(v['location'][0], v['location'][1], v['strand'], k))
# merge dictionary and return
Dict = merge_dicts(Dict, Genes)
return inter, Dict
def merge_dicts(x, y):
"""Given two dicts, merge them into a new dict as a shallow copy."""
z = x.copy()
z.update(y)
return z
def exonerate2hints(file, outfile):
# mimic exonerate2hints from GFF3 exonerate file
# CDSpart +/- 15 bp to each match
# intron as is
'''
#gff3 via EVM
scaffold_20 exonerate nucleotide_to_protein_match 225035 225823 82.13 + . ID=match.11677.2;Target=VC83_07547 1 96
scaffold_20 exonerate nucleotide_to_protein_match 53957 54342 92.93 + . ID=match.11677.3;Target=VC83_02595 1 129
scaffold_20 exonerate nucleotide_to_protein_match 54397 54904 92.93 + . ID=match.11677.3;Target=VC83_02595 130 299
scaffold_107 exonerate nucleotide_to_protein_match 77634 78119 89.95 - . ID=match.11677.5;Target=VC83_08471 1 163
scaffold_107 exonerate nucleotide_to_protein_match 77501 77546 89.95 - . ID=match.11677.5;Target=VC83_08471 163 178
scaffold_107 exonerate nucleotide_to_protein_match 77385 77422 89.95 - . ID=match.11677.5;Target=VC83_08471 179 191
#corresponding exonerate2hints
scaffold_20 xnt2h CDSpart 225050 225808 . + . src=XNT;grp=VC83_07547;pri=4
scaffold_20 xnt2h CDSpart 53972 54327 . + . src=XNT;grp=VC83_02595;pri=4
scaffold_20 xnt2h intron 54343 54396 . + . src=XNT;grp=VC83_02595;pri=4
scaffold_20 xnt2h CDSpart 54412 54889 . + . src=XNT;grp=VC83_02595;pri=4
scaffold_107 xnt2h CDSpart 77649 78104 . - . src=XNT;grp=VC83_08471;pri=4
scaffold_107 xnt2h intron 77547 77633 . - . src=XNT;grp=VC83_08471;pri=4
scaffold_107 xnt2h CDSpart 77516 77531 . - . src=XNT;grp=VC83_08471;pri=4
scaffold_107 xnt2h intron 77423 77500 . - . src=XNT;grp=VC83_08471;pri=4
scaffold_107 xnt2h CDSpart 77400 77407 . - . src=XNT;grp=VC83_08471;pri=4
'''
Genes = {}
with open(file, 'r') as input:
for line in input:
if line.startswith('\n') or line.startswith('#'):
continue
line = line.rstrip()
contig, source, feature, start, end, score, strand, phase, attributes = line.split(
'\t')
start = int(start)
end = int(end)
ID, Target = (None,)*2
info = attributes.split(';')
for x in info:
if x.startswith('ID='):
ID = x.replace('ID=', '')
elif x.startswith('Target='):
Target = x.replace('Target=', '').split(' ')[0]
if not ID in Genes:
Genes[ID] = {'id': ID, 'target': Target, 'loc': [
(start, end)], 'strand': strand, 'contig': contig}
else:
Genes[ID]['loc'].append((start, end))
# now lets sort through and write hints file
with open(outfile, 'w') as output:
for k, v in natsorted(list(Genes.items())):
if v['strand'] == '+':
sortedCDS = sorted(v['loc'], key=lambda tup: tup[0])
for i, x in enumerate(sortedCDS): # loop through tuples
output.write('{:}\txnt2h\tCDSpart\t{:}\t{:}\t.\t{:}\t.\tsrc=XNT;grp={:};pri=4\n'.format(
v['contig'], x[0]-15, x[1]+15, v['strand'], v['target']))
if len(sortedCDS) > 1:
try:
output.write('{:}\txnt2h\tintron\t{:}\t{:}\t.\t{:}\t.\tsrc=XNT;grp={:};pri=4\n'.format(
v['contig'], x[1]+1, sortedCDS[i+1][0]-1, v['strand'], v['target']))
except IndexError:
pass
else:
sortedCDS = sorted(
v['loc'], key=lambda tup: tup[0], reverse=True)
for i, x in enumerate(sortedCDS): # loop through tuples
output.write('{:}\txnt2h\tCDSpart\t{:}\t{:}\t.\t{:}\t.\tsrc=XNT;grp={:};pri=4\n'.format(
v['contig'], x[0]+15, x[1]-15, v['strand'], v['target']))
if len(sortedCDS) > 1:
try:
output.write('{:}\txnt2h\tintron\t{:}\t{:}\t.\t{:}\t.\tsrc=XNT;grp={:};pri=4\n'.format(
v['contig'], sortedCDS[i+1][1]+1, x[0]-1, v['strand'], v['target']))
except IndexError:
pass
def alignments2dict(input, Genes):
'''
function to take a transcript_alignments file and create dictionary
structure for each alignment
'''
with open(input, 'r') as infile:
for line in infile:
if line.startswith('\n') or line.startswith('#'):
continue
line = line.rstrip()
contig, source, feature, start, end, score, strand, phase, attributes = line.split(
'\t')
start = int(start)
end = int(end)
ID, Target, Extra = (None,)*3
for x in attributes.split(';'):
if x.startswith('ID='):
ID = x.replace('ID=', '')
elif x.startswith('Target='):
Target, Extra = x.split(' ', 1)
Target = Target.replace('Target=', '')
if not ID:
continue
if not ID in Genes:
Genes[ID] = {'mRNA': [(start, end)], 'strand': strand, 'pident': [score],
'location': (start, end), 'contig': contig, 'extra': [Extra]}
else:
if contig != Genes[ID]['contig']:
log.debug('ERROR: {:} mapped to multiple contigs: {:} and {:}'.format(ID, contig, Genes[ID]['contig']))
continue
elif strand != Genes[ID]['strand']:
log.debug('ERROR: {:} mapped has different strands'.format(ID))
continue
else:
Genes[ID]['mRNA'].append((start, end))
Genes[ID]['pident'].append(score)
Genes[ID]['extra'].append(Extra)
# double check mRNA features are contained in gene coordinates
if start < Genes[ID]['location'][0]:
Genes[ID]['location'] = (
start, Genes[ID]['location'][1])
if end > Genes[ID]['location'][1]:
Genes[ID]['location'] = (
Genes[ID]['location'][0], end)
return Genes
def introns_from_exons(input):
introns = []
if len(input) > 1:
for x, y in enumerate(input):
try:
introns.append((y[1]+1, input[x+1][0]-1))
except IndexError:
pass
return introns
def dict2hints(input, hints):
from collections import OrderedDict
'''
function to take simple alignments dictionary and ouput augustus hints file
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# sort the annotations by contig and start location
sGenes = natsorted(iter(input.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
with open(hints, 'w') as hintsout:
for k, v in list(sortedGenes.items()):
sortedExons = sorted(v['mRNA'], key=lambda tup: tup[0])
introns = introns_from_exons(sortedExons)
for i, exon in enumerate(sortedExons):
if i == 0 or i == len(sortedExons)-1:
hintsout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tgrp={:};pri=4;src=E\n'.format(
v['contig'], 'b2h', 'ep', exon[0], exon[1], 0, v['strand'], '.', k))
else:
hintsout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tgrp={:};pri=4;src=E\n'.format(
v['contig'], 'b2h', 'exon', exon[0], exon[1], 0, v['strand'], '.', k))
if len(introns) > 0:
for z in introns:
hintsout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tgrp={:};pri=4;src=E\n'.format(
v['contig'], 'b2h', 'intron', z[0], z[1], 1, v['strand'], '.', k))
def dict2transcriptgff3(input, output):
from collections import OrderedDict
'''
function to take simple alignments dictionary and ouput GFF3 transcripts file
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# sort the annotations by contig and start location
sGenes = natsorted(iter(input.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
with open(output, 'w') as outfile:
outfile.write('##gff-version 3\n')
for k, v in list(sortedGenes.items()):
for i, exon in enumerate(v['mRNA']):
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:};Target={:} {:}\n'.format(
v['contig'], 'genome', 'cDNA_match', exon[0], exon[1], v['pident'][i], v['strand'], '.',
k, k, v['extra'][i]))
def harmonize_transcripts(genome, alignments, gfffile, hintsfile, evidence=None, tmpdir='.', cpus=1, maxintron=3000):
from Bio.SeqIO.FastaIO import SimpleFastaParser
'''
function to check if evidence transcripts are missing from existing alignments and/or
write the augustus hints file
'''
Genes = {}
Genes = alignments2dict(alignments, Genes)
log.info('Parsed {:,} transcript alignments from: {:}'.format(len(Genes), alignments))
if evidence: # if nothing here then just move on
uniqueTranscripts = os.path.join(tmpdir, 'transcript_evidence_unique.fasta')
seqcount = 0
with open(uniqueTranscripts, 'w') as fasta_outfile:
for file in evidence:
with open(file, 'r') as fasta_infile:
for title, seq in SimpleFastaParser(fasta_infile):
if ' ' in title:
id = title.split(' ')[0]
else:
id = title
if not id in Genes:
fasta_outfile.write('>{:}\n{:}\n'.format(title, softwrap(seq)))
seqcount += 1
if seqcount > 0:
log.info('Aligning {:,} unique transcripts [not found in exising alignments] with minimap2'.format(seqcount))
minimapBAM = os.path.join(tmpdir, 'transcript_evidence_unique.bam')
minimapGFF = os.path.join(tmpdir, 'transcript_evidence_unique.gff3')
minimap2Align(uniqueTranscripts, genome, cpus, maxintron, minimapBAM)
mappedReads = bam2gff3(str(minimapBAM), minimapGFF)
if mappedReads > 0:
log.info('Mapped {:,} of these transcripts to the genome, adding to alignments'.format(mappedReads))
Genes = alignments2dict(minimapGFF, Genes)
else:
log.info('Mapped 0 of these transcripts to the genome')
log.info('Creating transcript EVM alignments and Augustus transcripts hintsfile')
dict2transcriptgff3(Genes, gfffile)
dict2hints(Genes, hintsfile)
def gff2dict(file, fasta, Genes, debug=False, gap_filter=False):
'''
general function to take a GFF3 file and return a funannotate standardized dictionary
locustag: {
'contig': contigName
'type': mRNA/rRNA/tRNA/ncRNA
'location': (start, end) #integer tuple
'strand': +/-
'ids': [transcript/protein IDs] #list
'mRNA':[[(ex1,ex1),(ex2,ex2)]] #list of lists of tuples (start, end)
'CDS':[[(cds1,cds1),(cds2,cds2)]] #list of lists of tuples (start, end)
'transcript': [seq1, seq2] #list of mRNA trnascripts
'cds_transcript': [seq1, seq2] #list of mRNA trnascripts (no UTRs)
'protein': [protseq1,protseq2] #list of CDS translations
'codon_start': [1,1] #codon start for translations
'note': [[first note, second note], [first, second, etc]] #list of lists
'name': genename
'product': [hypothetical protein, velvet complex] #list of product definitions
'gene_synonym': Aliases
'EC_number': [[ec number]]
'go_terms': [[GO:0000001,GO:0000002]] #list of lists
'db_xref': [[InterPro:IPR0001,PFAM:004384]] #list of lists
'partialStart': True/False
'partialStop': True/False
'source': annotation source
'phase': [[0,2,1]] list of lists
'5UTR': [[(),()]] #list of lists of tuples (start, end)
'3UTR': [[(),()]] #list of lists of tuples (start, end)
}
'''
idParent = {}
SeqRecords = SeqIO.to_dict(SeqIO.parse(fasta, 'fasta'))
with open(file, 'r') as input:
for line in input:
if line.startswith('\n') or line.startswith('#'):
continue
line = line.rstrip()
contig, source, feature, start, end, score, strand, phase, attributes = line.split('\t')
if feature not in ['gene', 'mRNA', 'exon', 'CDS', 'tRNA',
'ncRNA', 'rRNA', 'pseudogene', 'five_prime_UTR',
'five_prime_utr', 'three_prime_UTR',
'three_prime_utr', 'transcript']:
continue
if not contig in SeqRecords:
continue
start = int(start)
end = int(end)
ID, Parent, Name, Product, GeneFeature, gbkey = (None,)*6
Note, DBxref, GO, synonyms, ECnum = ([],)*5
info = attributes.split(';')
for x in info:
if x.startswith('ID='):
ID = x.replace('ID=', '')
elif x.startswith('Parent='):
Parent = x.replace('Parent=', '')
elif x.startswith('Name='):
Name = x.replace('Name=', '')
elif x.startswith('Note=') or x.startswith('note='):
Note = x.split('ote=')[-1]
if ',' in Note:
Note = Note.split(',')
else:
Note = [Note]
elif x.startswith('Dbxref='):
DBxref = x.replace('Dbxref=', '')
if ',' in DBxref:
DBxref = DBxref.split(',')
else:
DBxref = [DBxref]
elif x.startswith('Ontology_term='):
GO = x.replace('Ontology_term=', '')
if ',' in GO:
GO = GO.split(',')
else:
GO = [GO]
elif x.startswith('EC_number='):
ECnum = x.split('=',1)[-1]
if ',' in ECnum:
ECnum = ECnum.split(',')
else:
ECnum = [ECnum]
elif x.startswith('Product=') or x.startswith('product='):
Product = unquote(x.split('roduct=')[-1])
elif x.startswith('description='):
Product = unquote(x.replace('description=', ''))
elif x.startswith('Alias='):
synonyms = x.replace('Alias=', '')
synonyms = synonyms.split(',')
elif x.startswith('gbkey='): # genbank uses
gbkey = x.split('=', 1)[-1]
if feature == 'gene' or feature == 'pseudogene':
if not ID in Genes:
if feature == 'pseudogene':
pseudoFlag = True
else:
pseudoFlag = False
Genes[ID] = {'name': Name, 'type': None, 'transcript': [],
'cds_transcript': [], 'protein': [], '5UTR': [],
'3UTR': [], 'gene_synonym': synonyms,
'codon_start': [], 'ids': [], 'CDS': [],
'mRNA': [], 'strand': strand,
'EC_number': [],
'location': (start, end), 'contig': contig,
'product': [], 'source': source, 'phase': [],
'db_xref': [], 'go_terms': [], 'note': [],
'partialStart': [], 'partialStop': [],
'pseudo': pseudoFlag}
else:
if start < Genes[ID]['location'][0]:
Genes[ID]['location'] = (
start, Genes[ID]['location'][1])
if end > Genes[ID]['location'][1]:
Genes[ID]['location'] = (Genes[ID]['location'][0], end)
else:
if not ID or not Parent:
sys.stderr.write("Error, can't find ID or Parent. Malformed GFF file.\n")
sys.stderr.write(line)
sys.exit(1)
if feature in ['mRNA', 'transcript', 'tRNA', 'ncRNA', 'rRNA']:
if gbkey and gbkey == 'misc_RNA':
feature = 'ncRNA'
if not Product:
if feature in ['mRNA', 'transcript']:
Product = 'hypothetical protein'
if not Parent in Genes:
Genes[Parent] = {'name': Name, 'type': feature,
'transcript': [], 'cds_transcript': [],
'protein': [], '5UTR': [[]], '3UTR': [[]],
'codon_start': [[]], 'ids': [ID],
'CDS': [[]], 'mRNA': [[]], 'strand': strand,
'location': (start, end), 'contig': contig,
'product': [Product], 'source': source,
'phase': [[]], 'gene_synonym': synonyms,
'db_xref': [DBxref], 'go_terms': [GO],
'EC_number': [ECnum],
'note': [Note], 'partialStart': [False],
'partialStop': [False], 'pseudo': False}
else:
Genes[Parent]['ids'].append(ID)
Genes[Parent]['mRNA'].append([])
Genes[Parent]['CDS'].append([])
Genes[Parent]['phase'].append([])
Genes[Parent]['5UTR'].append([])
Genes[Parent]['3UTR'].append([])
Genes[Parent]['codon_start'].append([])
Genes[Parent]['partialStart'].append(False)
Genes[Parent]['partialStop'].append(False)
Genes[Parent]['product'].append(Product)
Genes[Parent]['db_xref'].append(DBxref)
Genes[Parent]['EC_number'].append(ECnum)
Genes[Parent]['gene_synonym'] += synonyms
Genes[Parent]['go_terms'].append(GO)
Genes[Parent]['note'].append(Note)
Genes[Parent]['type'] = feature
# double check mRNA features are contained in gene coordinates
if start < Genes[Parent]['location'][0]:
# print('{:} update start: {:} to {:}'.format(Parent, Genes[Parent]['location'][0],start))
Genes[Parent]['location'] = (
start, Genes[Parent]['location'][1])
if end > Genes[Parent]['location'][1]:
# print('{:} update stop: {:} to {:}'.format(Parent, Genes[Parent]['location'][1],end))
Genes[Parent]['location'] = (
Genes[Parent]['location'][0], end)
if not ID in idParent:
idParent[ID] = Parent
elif feature == 'exon':
if ',' in Parent:
parents = Parent.split(',')
else:
parents = [Parent]
for p in parents:
if p in idParent:
GeneFeature = idParent.get(p)
if GeneFeature:
if not GeneFeature in Genes:
Genes[GeneFeature] = {'name': Name, 'type': None,
'transcript': [], 'cds_transcript': [],
'protein': [], '5UTR': [[]], '3UTR': [[]],
'codon_start': [[]], 'ids': [p],
'CDS': [], 'mRNA': [[(start, end)]], 'strand': strand,
'location': None, 'contig': contig,
'product': [], 'source': source, 'phase': [[]],
'db_xref': [], 'go_terms': [],
'EC_number': [],
'note': [], 'partialStart': [False],
'partialStop': [False], 'pseudo': False,
'gene_synonym': synonyms}
else:
# determine which transcript this is get index from id
i = Genes[GeneFeature]['ids'].index(p)
Genes[GeneFeature]['mRNA'][i].append(
(start, end))
elif feature == 'CDS':
if ',' in Parent:
parents = Parent.split(',')
else:
parents = [Parent]
for p in parents:
if p in idParent:
GeneFeature = idParent.get(p)
if GeneFeature:
if not GeneFeature in Genes:
Genes[GeneFeature] = {'name': Name, 'type': None,
'transcript': [], 'cds_transcript': [],
'protein': [], '5UTR': [[]], '3UTR': [[]],
'codon_start': [[]], 'ids': [p],
'CDS': [[(start, end)]], 'mRNA': [], 'strand': strand,
'location': None, 'contig': contig,
'product': [], 'source': source, 'phase': [[]],
'db_xref': [], 'go_terms': [],
'EC_number': [],
'note': [], 'partialStart': [False],
'partialStop': [False], 'pseudo': False,
'gene_synonym': synonyms}
else:
# determine which transcript this is get index from id
i = Genes[GeneFeature]['ids'].index(p)
Genes[GeneFeature]['CDS'][i].append(
(start, end))
# add phase
try:
Genes[GeneFeature]['phase'][i].append(int(phase))
except ValueError:
Genes[GeneFeature]['phase'][i].append('?')
elif feature == 'five_prime_UTR' or feature == 'five_prime_utr':
if ',' in Parent:
parents = Parent.split(',')
else:
parents = [Parent]
for p in parents:
if p in idParent:
GeneFeature = idParent.get(p)
if GeneFeature:
if not GeneFeature in Genes:
Genes[GeneFeature] = {'name': Name, 'type': None,
'transcript': [], 'cds_transcript': [],
'protein': [], '5UTR': [[(start, end)]], '3UTR': [[]],
'codon_start': [[]], 'ids': [p],
'CDS': [], 'mRNA': [[(start, end)]], 'strand': strand,
'location': None, 'contig': contig,
'product': [], 'source': source, 'phase': [[]],
'db_xref': [], 'go_terms': [],
'EC_number': [],
'note': [], 'partialStart': [False],
'partialStop': [False], 'pseudo': False,
'gene_synonym': synonyms,}
else:
# determine which transcript this is get index from id
i = Genes[GeneFeature]['ids'].index(p)
Genes[GeneFeature]['5UTR'][i].append(
(start, end))
elif feature == 'three_prime_UTR' or feature == 'three_prime_utr':
if ',' in Parent:
parents = Parent.split(',')
else:
parents = [Parent]
for p in parents:
if p in idParent:
GeneFeature = idParent.get(p)
if GeneFeature:
if not GeneFeature in Genes:
Genes[GeneFeature] = {'name': Name, 'type': None,
'transcript': [], 'cds_transcript': [],
'protein': [], '5UTR': [[]], '3UTR': [[(start, end)]],
'codon_start': [[]], 'ids': [p],
'CDS': [], 'mRNA': [[(start, end)]], 'strand': strand,
'location': None, 'contig': contig,
'product': [], 'source': source, 'phase': [[]],
'db_xref': [], 'go_terms': [],
'EC_number': [],
'note': [], 'partialStart': [False],
'partialStop': [False], 'pseudo': False,
'gene_synonym': synonyms}
else:
# determine which transcript this is get index from id
i = Genes[GeneFeature]['ids'].index(p)
Genes[GeneFeature]['3UTR'][i].append(
(start, end))
# loop through and make sure CDS and exons are properly sorted and codon_start is correct, translate to protein space
for k, v in list(Genes.items()):
for i in range(0, len(v['ids'])):
if v['type'] in ['mRNA', 'tRNA', 'ncRNA', 'rRNA']:
if v['strand'] == '+':
sortedExons = sorted(v['mRNA'][i], key=lambda tup: tup[0])
else:
sortedExons = sorted(
v['mRNA'][i], key=lambda tup: tup[0], reverse=True)
Genes[k]['mRNA'][i] = sortedExons
mrnaSeq = getSeqRegions(SeqRecords, v['contig'], sortedExons)
if gap_filter:
mrnaSeq, Genes[k]['mRNA'][i] = start_end_gap(mrnaSeq, Genes[k]['mRNA'][i])
v['transcript'].append(mrnaSeq)
if v['type'] == 'mRNA':
if not v['CDS'][i]:
sys.stderr.write('ERROR: ID={:} has no CDS features, removing gene model\n'.format(k))
del Genes[k]
continue
if v['strand'] == '+':
sortedCDS = sorted(v['CDS'][i], key=lambda tup: tup[0])
else:
sortedCDS = sorted(v['CDS'][i], key=lambda tup: tup[0], reverse=True)
#get the codon_start by getting first CDS phase + 1
indexStart = [x for x, y in enumerate(v['CDS'][i]) if y[0] == sortedCDS[0][0]]
cdsSeq = getSeqRegions(SeqRecords, v['contig'], sortedCDS)
if gap_filter:
cdsSeq, v['CDS'][i] = start_end_gap(cdsSeq, v['CDS'][i])
protSeq, codon_start = (None,)*2
try:
currentphase = v['phase'][i]
except IndexError:
pass
if '?' in v['phase'][i]: #dont know the phase -- malformed GFF3, try to find best CDS
translateResults = []
for y in [1,2,3]:
protSeq = translate(cdsSeq, v['strand'], y-1)
if not protSeq:
log.debug('Translation of {:} using {:} phase failed'.format(v['ids'][i], y-1))
continue
numStops = protSeq.count('*')
if protSeq[-1] == '*':
numStops -= 1
translateResults.append((y, numStops, protSeq))
sortedResults = sorted(translateResults, key=lambda tup: tup[1])
codon_start = sortedResults[0][0]
protSeq = sortedResults[0][2]
else:
try:
codon_start = int(v['phase'][i][indexStart[0]]) + 1
except IndexError:
pass
#translate and get protein sequence
protSeq = translate(cdsSeq, v['strand'], codon_start-1)
Genes[k]['codon_start'][i] = codon_start
if codon_start > 1:
if v['strand'] == '+':
cdsSeq = cdsSeq[codon_start - 1:]
elif v['strand'] == '-':
endTrunc = len(cdsSeq) - codon_start -1
cdsSeq = cdsSeq[0:endTrunc]
else:
print("ERROR nonsensical strand (%s) for gene %s"%([v['strand'],k]))
Genes[k]['cds_transcript'].append(cdsSeq)
Genes[k]['CDS'][i] = sortedCDS
v['protein'].append(protSeq)
if protSeq:
if protSeq.endswith('*'):
v['partialStop'][i] = False
else:
v['partialStop'][i] = True
if v['codon_start'][i] == 1 and v['protein'][i].startswith('M'):
v['partialStart'][i] = False
else:
v['partialStart'][i] = True
# since its possible updated the mRNA/CDS fields, double check that gene coordinates are ok
if k not in Genes:
continue
all_mRNA_coords = [item for sublist in v['mRNA'] for item in sublist]
try:
Genes[k]['location'] = (min(all_mRNA_coords, key=lambda item: item[0])[0], max(all_mRNA_coords, key=lambda item: item[1])[1])
except ValueError:
continue
# clean up any repeated synonym
if len(v['gene_synonym']) > 1:
uniqueSynonyms = set(v['gene_synonym'])
Genes[k]['gene_synonym'] = list(uniqueSynonyms)
return Genes
def start_end_gap(seq, coords):
if seq.startswith('N'):
oldLen = len(seq)
seq = seq.lstrip('N')
numLeftStripped = oldLen - len(seq)
coords[0] = (coords[0][0]+numLeftStripped, coords[0][1])
if seq.endswith('N'):
oldLen = len(seq)
seq = seq.rstrip('N')
numRightStripped = oldLen - len(seq)
coords[-1] = (coords[-1][0], coords[-1][1]-numRightStripped)
return seq, coords
def simplifyGO(inputList):
simple = []
for x in inputList:
if x.startswith('GO:'):
simple.append(x.strip())
elif ' ' in x:
simple.append(x.split(' ')[1])
return simple
def dict2gff3(input, output, debug=False):
from collections import OrderedDict
'''
function to convert funannotate gene dictionary to gff3 output
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# sort the annotations by contig and start location
sGenes = natsorted(iter(input.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
# then loop through and write GFF3 format
with open(output, 'w') as gffout:
gffout.write("##gff-version 3\n")
for k, v in list(sortedGenes.items()):
if 'pseudo' in v:
if v['pseudo']:
continue
if v['type'] == 'mRNA' and not v['CDS']:
continue
if v['type'] == 'mRNA' and not len(v['ids']) == len(v['mRNA']) == len(v['CDS']):
continue
if v['type'] == 'mRNA' and len(v['CDS']) == 0:
continue
if v['type'] is None:
continue
if v['name']:
if 'gene_synonym' in v and len(v['gene_synonym']) > 0:
gffout.write(
"{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};Name={:};Alias={:};\n".format(
v['contig'], v['source'],v['location'][0],
v['location'][1], v['strand'], k, v['name'],
','.join(v['gene_synonym'])))
else:
gffout.write(
"{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};Name={:};\n".format(
v['contig'], v['source'], v['location'][0],
v['location'][1], v['strand'], k, v['name']))
else:
if 'gene_synonym' in v and len(v['gene_synonym']) > 0:
gffout.write(
"{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};Alias={:};\n".format(
v['contig'], v['source'], v['location'][0],
v['location'][1], v['strand'], k,
','.join(v['gene_synonym'])))
else:
gffout.write(
"{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};\n".format(
v['contig'], v['source'], v['location'][0],
v['location'][1], v['strand'], k))
for i in range(0, len(v['ids'])):
# make sure coordinates are sorted
if v['strand'] == '+':
sortedExons = sorted(v['mRNA'][i], key=lambda tup: tup[0])
sortedCDS = sorted(v['CDS'][i], key=lambda tup: tup[0])
if '5UTR' in v and v['5UTR'][i]:
sortedFive = sorted(
v['5UTR'][i], key=lambda tup: tup[0])
if '3UTR' in v and v['3UTR'][i]:
sortedThree = sorted(
v['3UTR'][i], key=lambda tup: tup[0])
else:
sortedExons = sorted(
v['mRNA'][i], key=lambda tup: tup[0], reverse=True)
sortedCDS = sorted(
v['CDS'][i], key=lambda tup: tup[0], reverse=True)
if '5UTR' in v and v['5UTR'][i]:
sortedFive = sorted(
v['5UTR'][i], key=lambda tup: tup[0], reverse=True)
if '3UTR' in v and v['3UTR'][i]:
sortedThree = sorted(
v['3UTR'][i], key=lambda tup: tup[0], reverse=True)
# build extra annotations for each transcript if applicable
extraAnnotations = ''
if 'gene_synonym' in v and len(v['gene_synonym']) > 0:
extraAnnotations = extraAnnotations + \
'Alias={:};'.format(','.join(v['gene_synonym']))
if len(v['go_terms'][i]) > 0:
go_annotations = simplifyGO(v['go_terms'][i])
extraAnnotations = extraAnnotations + \
'Ontology_term={:};'.format(','.join(go_annotations))
if len(v['db_xref'][i]) > 0:
extraAnnotations = extraAnnotations + \
'Dbxref={:};'.format(','.join(v['db_xref'][i]))
if 'EC_number' in v and len(v['EC_number'][i]) > 0:
extraAnnotations = extraAnnotations + \
'EC_number={:};'.format(','.join(v['EC_number'][i]))
if len(v['note'][i]) > 0:
CleanedNote = [] # need to make sure no commas or semi-colons in these data else will cause problems in parsing GFF3 output downstream
for x in v['note'][i]:
if ';' in x:
x = x.replace(';', '.')
if ':' in x:
base, values = x.split(':', 1)
if not ',' in values:
CleanedNote.append(base+':'+values)
else:
for y in values.split(','):
CleanedNote.append(base+':'+y)
else:
CleanedNote.append(x.replace(',', ''))
extraAnnotations = extraAnnotations + \
'note={:};'.format(','.join(CleanedNote))
# now write mRNA feature
gffout.write(
"{:}\t{:}\t{:}\t{:}\t{:}\t.\t{:}\t.\tID={:};Parent={:};product={:};{:}\n".format(
v['contig'], v['source'], v['type'], v['location'][0],
v['location'][1], v['strand'], v['ids'][i], k,
v['product'][i], extraAnnotations))
if v['type'] in ['mRNA', 'tRNA', 'ncRNA']:
if '5UTR' in v and v['5UTR'][i]:
# if 5'UTR then write those first
num_5utrs = len(v['5UTR'][i])
if num_5utrs > 0:
for z in range(0, num_5utrs):
u_num = z + 1
gffout.write("{:}\t{:}\tfive_prime_UTR\t{:}\t{:}\t.\t{:}\t.\tID={:}.utr5p{:};Parent={:};\n".format(
v['contig'], v['source'], sortedFive[z][0], sortedFive[z][1], v['strand'], v['ids'][i],
u_num, v['ids'][i]))
# write the exons
num_exons = len(v['mRNA'][i])
for x in range(0, num_exons):
ex_num = x + 1
gffout.write("{:}\t{:}\texon\t{:}\t{:}\t.\t{:}\t.\tID={:}.exon{:};Parent={:};\n".format(
v['contig'], v['source'], sortedExons[x][0], sortedExons[x][1], v['strand'],
v['ids'][i], ex_num, v['ids'][i]))
# if 3'UTR then write
if '3UTR' in v and v['3UTR'][i]:
num_3utrs = len(v['3UTR'][i])
if num_3utrs > 0:
for z in range(0, num_3utrs):
u_num = z + 1
gffout.write("{:}\t{:}\tthree_prime_UTR\t{:}\t{:}\t.\t{:}\t.\tID={:}.utr3p{:};Parent={:};\n".format(
v['contig'], v['source'], sortedThree[z][0], sortedThree[z][1], v['strand'],
v['ids'][i], u_num, v['ids'][i]))
if v['type'] == 'mRNA':
num_cds = len(v['CDS'][i])
# GFF3 phase is 1 less than flat file
current_phase = v['codon_start'][i] - 1
for y in range(0, num_cds):
gffout.write("{:}\t{:}\tCDS\t{:}\t{:}\t.\t{:}\t{:}\tID={:}.cds;Parent={:};\n".format(
v['contig'], v['source'], sortedCDS[y][0], sortedCDS[y][1], v['strand'],
current_phase, v['ids'][i], v['ids'][i]))
current_phase = (
current_phase - (int(sortedCDS[y][1]) - int(sortedCDS[y][0]) + 1)) % 3
if current_phase == 3:
current_phase = 0
def dict2gff3_old(input, output):
from collections import OrderedDict
'''
function to convert funannotate gene dictionary to gff3 output
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# sort the annotations by contig and start location
sGenes = sorted(iter(input.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
# then loop through and write GFF3 format
with open(output, 'w') as gffout:
gffout.write("##gff-version 3\n")
for k, v in list(sortedGenes.items()):
if 'pseudo' in v:
if v['pseudo']:
continue
if v['type'] == 'mRNA' and not v['CDS']:
continue
if v['type'] == 'mRNA' and not len(v['ids']) == len(v['mRNA']) == len(v['CDS']):
continue
if v['type'] == 'mRNA' and len(v['CDS']) == 0:
continue
if v['type'] is None:
continue
if v['name']:
gffout.write("{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};Name={:};\n".format(
v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], k, v['name']))
else:
gffout.write("{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};\n".format(
v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], k))
for i in range(0, len(v['ids'])):
# build extra annotations for each transcript if applicable
extraAnnotations = ''
if len(v['go_terms'][i]) > 0:
extraAnnotations = extraAnnotations + \
'Ontology_term={:};'.format(','.join(v['go_terms'][i]))
if len(v['db_xref'][i]) > 0:
extraAnnotations = extraAnnotations + \
'Dbxref={:};'.format(','.join(v['db_xref'][i]))
if len(v['note'][i]) > 0:
extraAnnotations = extraAnnotations + \
'note={:};'.format(','.join(v['note'][i]))
# now write mRNA feature
gffout.write("{:}\t{:}\t{:}\t{:}\t{:}\t.\t{:}\t.\tID={:};Parent={:};product={:};{:}\n".format(
v['contig'], v['source'], v['type'], v['location'][0], v['location'][1], v['strand'], v['ids'][i], k, v['product'][i], extraAnnotations))
if v['type'] == 'mRNA' or v['type'] == 'tRNA':
if '5UTR' in v:
# if 5'UTR then write those first
num_5utrs = len(v['5UTR'][i])
if num_5utrs > 0:
for z in range(0, num_5utrs):
u_num = z + 1
gffout.write("{:}\t{:}\tfive_prime_UTR\t{:}\t{:}\t.\t{:}\t.\tID={:}.utr5p{:};Parent={:};\n".format(
v['contig'], v['source'], v['5UTR'][i][z][0], v['5UTR'][i][z][1], v['strand'], v['ids'][i], u_num, v['ids'][i]))
# write the exons
num_exons = len(v['mRNA'][i])
for x in range(0, num_exons):
ex_num = x + 1
gffout.write("{:}\t{:}\texon\t{:}\t{:}\t.\t{:}\t.\tID={:}.exon{:};Parent={:};\n".format(
v['contig'], v['source'], v['mRNA'][i][x][0], v['mRNA'][i][x][1], v['strand'], v['ids'][i], ex_num, v['ids'][i]))
# if 3'UTR then write
if '3UTR' in v:
num_3utrs = len(v['3UTR'][i])
if num_3utrs > 0:
for z in range(0, num_3utrs):
u_num = z + 1
gffout.write("{:}\t{:}\tthree_prime_UTR\t{:}\t{:}\t.\t{:}\t.\tID={:}.utr3p{:};Parent={:};\n".format(
v['contig'], v['source'], v['3UTR'][i][z][0], v['3UTR'][i][z][1], v['strand'], v['ids'][i], u_num, v['ids'][i]))
if v['type'] == 'mRNA':
num_cds = len(v['CDS'][i])
# GFF3 phase is 1 less than flat file
current_phase = v['codon_start'][i] - 1
for y in range(0, num_cds):
gffout.write("{:}\t{:}\tCDS\t{:}\t{:}\t.\t{:}\t{:}\tID={:}.cds;Parent={:};\n".format(
v['contig'], v['source'], v['CDS'][i][y][0], v['CDS'][i][y][1], v['strand'], current_phase, v['ids'][i], v['ids'][i]))
current_phase = (
current_phase - (int(v['CDS'][i][y][1]) - int(v['CDS'][i][y][0]) + 1)) % 3
if current_phase == 3:
current_phase = 0
def dict2gff3noUTRs(input, output):
from collections import OrderedDict
'''
function to convert funannotate gene dictionary to gff3 output, no UTRs!
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# sort the annotations by contig and start location
sGenes = sorted(iter(input.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
# then loop through and write GFF3 format
with open(output, 'w') as gffout:
gffout.write("##gff-version 3\n")
for k, v in list(sortedGenes.items()):
if v['name']:
gffout.write("{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};Name={:};\n".format(
v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], k, v['name']))
else:
gffout.write("{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};\n".format(
v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], k))
for i in range(0, len(v['ids'])):
# build extra annotations for each transcript if applicable
extraAnnotations = ''
if len(v['go_terms'][i]) > 0:
extraAnnotations = extraAnnotations + \
'Ontology_term={:};'.format(','.join(v['go_terms'][i]))
if len(v['db_xref'][i]) > 0:
extraAnnotations = extraAnnotations + \
'Dbxref={:};'.format(','.join(v['db_xref'][i]))
if len(v['note'][i]) > 0:
extraAnnotations = extraAnnotations + \
'note={:};'.format(','.join(v['note'][i]))
# now write mRNA feature
gffout.write("{:}\t{:}\t{:}\t{:}\t{:}\t.\t{:}\t.\tID={:};Parent={:};product={:};{:}\n".format(
v['contig'], v['source'], v['type'], v['location'][0], v['location'][1], v['strand'], v['ids'][i], k, v['product'][i], extraAnnotations))
if v['type'] == 'tRNA':
# write the exons and CDS features
num_exons = len(v['mRNA'][i])
for x in range(0, num_exons):
ex_num = x + 1
gffout.write("{:}\t{:}\texon\t{:}\t{:}\t.\t{:}\t.\tID={:}.exon{:};Parent={:};\n".format(
v['contig'], v['source'], v['mRNA'][i][x][0], v['mRNA'][i][x][1], v['strand'], v['ids'][i], ex_num, v['ids'][i]))
elif v['type'] == 'mRNA':
num_cds = len(v['CDS'][i])
# GFF3 phase is 1 less than flat file
current_phase = v['codon_start'][i] - 1
for y in range(0, num_cds):
ex_num = y + 1
gffout.write("{:}\t{:}\texon\t{:}\t{:}\t.\t{:}\t.\tID={:}.exon{:};Parent={:};\n".format(
v['contig'], v['source'], v['CDS'][i][y][0], v['CDS'][i][y][1], v['strand'], v['ids'][i], ex_num, v['ids'][i]))
gffout.write("{:}\t{:}\tCDS\t{:}\t{:}\t.\t{:}\t{:}\tID={:}.cds;Parent={:};\n".format(
v['contig'], v['source'], v['CDS'][i][y][0], v['CDS'][i][y][1], v['strand'], current_phase, v['ids'][i], v['ids'][i]))
current_phase = (
current_phase - (int(v['CDS'][i][y][1]) - int(v['CDS'][i][y][0]) + 1)) % 3
if current_phase == 3:
current_phase = 0
def gtf2dict(input):
Genes = {}
with open(input, 'r') as inFile:
for line in inFile:
if line.startswith('\n') or line.startswith('#'):
continue
line = line.rstrip()
# CM002242 StringTie transcript 4198460 4199001 1000 + . gene_id "STRG.18087"; transcript_id "STRG.18087.2"; cov "5.905163"; FPKM "3.279455"; TPM "9.789504";
# CM002242 StringTie exon 4198460 4198609 1000 + . gene_id "STRG.18087"; transcript_id "STRG.18087.2"; exon_number "1"; cov "6.999466";
contig, source, feature, start, end, score, strand, phase, attributes = line.split(
'\t')
start = int(start)
end = int(end)
ID, transcriptID, TPM = (None,)*3
info = attributes.split(';')
for x in info:
x = x.strip()
x = x.replace('"', '')
if x.startswith('gene_id '):
ID = x.replace('gene_id ', '')
elif x.startswith('transcript_id '):
transcriptID = x.replace('transcript_id ', '')
elif x.startswith('TPM '):
TPM = x.replace('TPM ', '')
if feature == 'transcript':
if not ID in Genes:
Genes[ID] = {'type': 'mRNA', 'codon_start': [1], 'ids': [transcriptID], 'CDS': [[]], 'mRNA': [[]], 'strand': strand,
'location': (start, end), 'contig': contig, 'source': source, 'tpm': [TPM]}
else:
if start < Genes[ID]['location'][0]:
Genes[ID]['location'] = (
start, Genes[ID]['location'][1])
if end > Genes[ID]['location'][1]:
Genes[ID]['location'] = (Genes[ID]['location'][0], end)
Genes[ID]['ids'].append(transcriptID)
Genes[ID]['mRNA'].append([])
Genes[ID]['CDS'].append([])
Genes[ID]['codon_start'].append(1)
Genes[ID]['tpm'].append(TPM)
else:
if not ID or not transcriptID:
print(
"Error, can't find geneID or transcriptID. Malformed GTF file.")
print(line)
sys.exit(1)
if feature == 'exon':
if not ID in Genes:
Genes[ID] = {'type': 'mRNA', 'codon_start': [1], 'ids': [transcriptID], 'CDS': [[(start, end)]], 'mRNA': [[(start, end)]], 'strand': strand,
'location': (start, end), 'contig': contig, 'source': source, 'tpm': []}
else:
if transcriptID in Genes[ID]['ids']: # then add exon
i = Genes[ID]['ids'].index(transcriptID)
Genes[ID]['mRNA'][i].append((start, end))
Genes[ID]['CDS'][i].append((start, end))
# loop through dictionary and make sure properly sorted exons
for k, v in list(Genes.items()):
for i in range(0, len(v['ids'])):
if v['strand'] == '+':
sortedExons = sorted(v['mRNA'][i], key=lambda tup: tup[0])
sortedCDS = sorted(v['CDS'][i], key=lambda tup: tup[0])
else:
sortedExons = sorted(
v['mRNA'][i], key=lambda tup: tup[0], reverse=True)
sortedCDS = sorted(
v['CDS'][i], key=lambda tup: tup[0], reverse=True)
Genes[k]['mRNA'][i] = sortedExons
Genes[k]['CDS'][i] = sortedCDS
return Genes
def Stringtie_dict2gff3(input, output):
from collections import OrderedDict
'''
function to convert funannotate gene dictionary to gff3 output
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# sort the annotations by contig and start location
sGenes = sorted(iter(input.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
# then loop through and write GFF3 format
with open(output, 'w') as outfile:
outfile.write("##gff-version 3\n")
for k, v in list(sortedGenes.items()):
outfile.write("{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};\n".format(
v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], k))
for i in range(0, len(v['ids'])):
# build extra annotations for each transcript if applicable
# now write mRNA feature
outfile.write("{:}\t{:}\t{:}\t{:}\t{:}\t.\t{:}\t.\tID={:};Parent={:};TPM={:}\n".format(
v['contig'], v['source'], v['type'], v['location'][0], v['location'][1], v['strand'], v['ids'][i], k, v['tpm'][i]))
if v['type'] == 'mRNA':
if '5UTR' in v:
# if 5'UTR then write those first
num_5utrs = len(v['5UTR'][i])
if num_5utrs > 0:
for z in range(0, num_5utrs):
u_num = z + 1
outfile.write("{:}\t{:}\tfive_prime_UTR\t{:}\t{:}\t.\t{:}\t.\tID={:}.utr5p{:};Parent={:};\n".format(
v['contig'], v['source'], v['5UTR'][i][z][0], v['5UTR'][i][z][1], v['strand'], v['ids'][i], u_num, v['ids'][i]))
# write the exons
num_exons = len(v['mRNA'][i])
for x in range(0, num_exons):
ex_num = x + 1
outfile.write("{:}\t{:}\texon\t{:}\t{:}\t.\t{:}\t.\tID={:}.exon{:};Parent={:};\n".format(
v['contig'], v['source'], v['mRNA'][i][x][0], v['mRNA'][i][x][1], v['strand'], v['ids'][i], ex_num, v['ids'][i]))
# if 3'UTR then write
if '3UTR' in v:
num_3utrs = len(v['3UTR'][i])
if num_3utrs > 0:
for z in range(0, num_3utrs):
u_num = z + 1
outfile.write("{:}\t{:}\tthree_prime_UTR\t{:}\t{:}\t.\t{:}\t.\tID={:}.utr3p{:};Parent={:};\n".format(
v['contig'], v['source'], v['3UTR'][i][z][0], v['3UTR'][i][z][1], v['strand'], v['ids'][i], u_num, v['ids'][i]))
if v['type'] == 'mRNA':
num_cds = len(v['CDS'][i])
# GFF3 phase is 1 less than flat file
current_phase = v['codon_start'][i] - 1
for y in range(0, num_cds):
outfile.write("{:}\t{:}\tCDS\t{:}\t{:}\t.\t{:}\t{:}\tID={:}.cds;Parent={:};\n".format(
v['contig'], v['source'], v['CDS'][i][y][0], v['CDS'][i][y][1], v['strand'], current_phase, v['ids'][i], v['ids'][i]))
current_phase = (
current_phase - (int(v['CDS'][i][y][1]) - int(v['CDS'][i][y][0]) + 1)) % 3
if current_phase == 3:
current_phase = 0
def Quarry2GFF3(input, output):
with open(output, 'w') as outfile:
outfile.write(("##gff-version 3\n"))
exonCounts = {}
GeneCount = 1
with open(input, 'r') as infile:
for line in infile:
line = line.strip()
contig, source, feature, start, end, score, strand, phase, attributes = line.split(
'\t')
source = 'CodingQuarry'
ID, Parent, Name = (None,)*3
info = attributes.split(';')
for x in info:
if x.startswith('ID='):
ID = x.replace('ID=', '')
elif x.startswith('Parent='):
Parent = x.replace('Parent=', '')
if ID and ' ' in ID:
ID = ID.split(' ')[0]
if Parent and ' ' in Parent:
Parent = Parent.split(' ')[0]
if feature == 'gene':
geneID = 'gene_'+str(GeneCount)
transID = 'transcript_'+str(GeneCount)+'-T1'
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:};Name={:};Alias={:};\n'.format(
contig, source, feature, start, end, score, strand, phase, geneID, geneID, ID))
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:};Parent={:};Alias={:};\n'.format(
contig, source, 'mRNA', start, end, '.', strand, '.', transID, geneID, ID))
GeneCount += 1
elif feature == 'CDS':
if not transID in exonCounts:
exonCounts[transID] = 1
else:
exonCounts[transID] += 1
num = exonCounts.get(transID)
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:}.exon{:};Parent={:};\n'.format(
contig, source, 'exon', start, end, '.', strand, '.', transID, num, transID))
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:}.cds;Parent={:};\n'.format(
contig, source, feature, start, end, score, strand, phase, transID, transID))
def runStringtie(bamfile, cpus, output):
'''
Function to run stringtie from bamfile
Note that when given the bamfile, no way to determine strandeness so will run unstranded
'''
cmd = ['stringtie', '-p', str(cpus), os.path.realpath(bamfile)]
runSubprocess2(cmd, '.', log, os.path.abspath(output))
def runCodingQuarry(genome, stringtie, cpus, output):
'''
run CodingQuarry from stringtie GTF input file
'''
# first get basename directory as need to create tmp CodingQuarry dir
basedir = os.path.dirname(genome)
tmpdir = os.path.join(basedir, 'CodingQuarry')
if not os.path.isdir(tmpdir):
os.makedirs(tmpdir)
# convert GTF to GFF3 file
stringtieGFF3 = os.path.join(basedir, 'stringtie.gff3')
Genes = gtf2dict(stringtie)
Stringtie_dict2gff3(Genes, stringtieGFF3)
# now setup command and run from tmpdir folder
cmd = ['CodingQuarry', '-p',
str(cpus), '-f', os.path.realpath(genome), '-t', os.path.realpath(stringtieGFF3)]
runSubprocess(cmd, tmpdir, log)
# capture results and reformat to proper GFF3
result = os.path.join(tmpdir, 'out', 'PredictedPass.gff3')
if not checkannotations(result):
log.error('CodingQuarry failed, moving on without result, check logfile')
return False
else:
Quarry2GFF3(result, output)
return True
def runCodingQuarryTrained(genome, species, tmpdir, cpus, output):
# now setup command and run from tmpdir folder
log.info(
'CodingQuarry prediction is running using {:} paremeters'.format(species))
cmd = ['CodingQuarry', '-p',
str(cpus), '-f', os.path.realpath(genome), '-s', species]
log.debug(' '.join(cmd))
myENV = os.environ
if 'QUARRY_PATH' in myENV:
del myENV['QUARRY_PATH']
FNULL = open(os.devnull, 'w')
p1 = subprocess.Popen(cmd, stdout=FNULL, stderr=FNULL,
cwd=tmpdir, env=dict(myENV))
p1.communicate()
# capture results and reformat to proper GFF3
result = os.path.join(tmpdir, 'out', 'PredictedPass.gff3')
if not checkannotations(result):
log.error('CodingQuarry failed, moving on without result, check logfile')
return False
else:
Quarry2GFF3(result, output)
return True
def dict2gtf(input, output):
from collections import OrderedDict
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# sort the annotations by contig and start location
sGenes = sorted(iter(input.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
with open(output, 'w') as gtfout:
for k, v in list(sortedGenes.items()):
if v['type'] != 'mRNA':
continue
if 'pseudo' in v:
if v['pseudo']:
continue
if v['type'] == 'mRNA' and not v['CDS']:
continue
if v['type'] == 'mRNA' and not len(v['ids']) == len(v['mRNA']) == len(v['CDS']):
continue
for i in range(0, len(v['ids'])):
# create attributes string
attributes = 'gene_id "{:}"; transcript_id "{:}";'.format(
k, v['ids'][i])
# if v['name']:
# attributes = attributes + ' Name "{:}";'.format(v['name'])
if len(v['5UTR'][i]) > 0:
for utr in v['5UTR'][i]:
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(
v['contig'], v['source'], '5UTR', utr[0], utr[1], 0, v['strand'], 0, attributes))
if not v['partialStart'][i]:
if v['strand'] == '+':
startCodon = (v['CDS'][i][0][0], v['CDS'][i][0][0]+2)
else:
startCodon = (v['CDS'][i][0][1]-2, v['CDS'][i][0][1])
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(
v['contig'], v['source'], 'start_codon', startCodon[0], startCodon[1], 0, v['strand'], 0, attributes))
for x, cds in enumerate(v['CDS'][i]):
if v['partialStop'][i]: # then just write the whole CDS as no reason to move codon back
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(
v['contig'], v['source'], 'CDS', cds[0], cds[1], 0, v['strand'], v['phase'][i][x], attributes))
else:
if v['strand'] == '+':
if x == len(v['CDS'][i])-1: # this is last one
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(
v['contig'], v['source'], 'CDS', cds[0], cds[1]-3, 0, v['strand'], v['phase'][i][x], attributes))
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(
v['contig'], v['source'], 'stop_codon', cds[1]-2, cds[1], 0, v['strand'], 0, attributes))
else:
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(
v['contig'], v['source'], 'CDS', cds[0], cds[1], 0, v['strand'], v['phase'][i][x], attributes))
else:
if x == len(v['CDS'][i])-1: # this is last one
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(
v['contig'], v['source'], 'CDS', cds[0]+3, cds[1], 0, v['strand'], v['phase'][i][x], attributes))
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(
v['contig'], v['source'], 'stop_codon', cds[0], cds[0]+2, 0, v['strand'], 0, attributes))
else:
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(
v['contig'], v['source'], 'CDS', cds[0], cds[1], 0, v['strand'], v['phase'][i][x], attributes))
if len(v['3UTR'][i]) > 0:
for utr in v['3UTR'][i]:
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(
v['contig'], v['source'], '3UTR', utr[0], utr[1], 0, v['strand'], 0, attributes))
gtfout.write('\n')
def gff3_to_gtf(input, genome, output):
Genes = {}
Genes = gff2dict(input, genome, Genes)
dict2gtf(Genes, output)
def gb2allout(input, GFF, Proteins, Transcripts, DNA):
'''
function to split GBK file into parts, need to be able to deal with multiple transcripts and get naming correct
assumption is that the mRNA and CDS features from multiple transcripts are in order, i.e. the first mRNA feature
you see corresponds to first CDS feature, etc. **hopefully this is an okay assumption**
'''
# idea is to populate the dictionary first, then write GFF, proteins, transcripts, can write DNA on first pass
genes = {}
with open(DNA, 'w') as scaffolds:
with open(input, 'r') as gbk:
for record in SeqIO.parse(gbk, 'genbank'):
scaffolds.write(">{:}\n{:}\n".format(
record.id, softwrap(str(record.seq))))
for f in record.features:
gb_feature_add2dict(f, record, genes)
# write GFF
dict2gff3_old(genes, GFF)
# write to protein and transcripts
dict2nucleotides(genes, Proteins, Transcripts)
def minimap2Align(transcripts, genome, cpus, intron, output):
'''
function to align transcripts to genome using minimap2
huge speed increase over gmap + blat
'''
FNULL = open(os.devnull, 'w')
bamthreads = int(round(int(cpus) / 2))
if bamthreads > 4:
bamthreads = 4
minimap2_cmd = ['minimap2', '-ax', 'splice', '-t',
str(cpus), '--cs', '-u', 'b', '-G', str(intron), genome,
transcripts]
samtools_cmd = ['samtools', 'sort', '--reference', genome,
'-@', str(bamthreads), '-o', output, '-']
log.debug('{} | {}'.format(' '.join(minimap2_cmd), ' '. join(samtools_cmd)))
p1 = subprocess.Popen(minimap2_cmd, stdout=subprocess.PIPE, stderr=FNULL)
p2 = subprocess.Popen(samtools_cmd, stdout=subprocess.PIPE, stderr=FNULL, stdin=p1.stdout)
p1.stdout.close()
p2.communicate()
def iso_seq_minimap2(transcripts, genome, cpus, intron, output):
'''
function to align PB iso-seq data
'''
FNULL = open(os.devnull, 'w')
bamthreads = int(round(int(cpus) / 2))
if bamthreads > 4:
bamthreads = 4
minimap2_cmd = ['minimap2', '-ax', 'splice', '-t',
str(cpus), '--cs', '-uf', '-C5', '-G', str(intron), genome,
transcripts]
samtools_cmd = ['samtools', 'sort', '--reference', genome,
'-@', str(bamthreads), '-o', output, '-']
log.debug('{} | {}'.format(' '.join(minimap2_cmd), ' '. join(samtools_cmd)))
p1 = subprocess.Popen(minimap2_cmd, stdout=subprocess.PIPE, stderr=FNULL)
p2 = subprocess.Popen(samtools_cmd, stdout=subprocess.PIPE, stderr=FNULL, stdin=p1.stdout)
p1.stdout.close()
p2.communicate()
def nanopore_cDNA_minimap2(transcripts, genome, cpus, intron, output):
'''
function to nanopore 2d cDNA
'''
FNULL = open(os.devnull, 'w')
bamthreads = int(round(int(cpus) / 2))
if bamthreads > 4:
bamthreads = 4
minimap2_cmd = ['minimap2', '-ax', 'splice', '-t',
str(cpus), '--cs', '-G', str(intron), genome, transcripts]
samtools_cmd = ['samtools', 'sort', '--reference', genome,
'-@', str(bamthreads), '-o', output, '-']
log.debug('{} | {}'.format(' '.join(minimap2_cmd), ' '. join(samtools_cmd)))
p1 = subprocess.Popen(minimap2_cmd, stdout=subprocess.PIPE, stderr=FNULL)
p2 = subprocess.Popen(samtools_cmd, stdout=subprocess.PIPE, stderr=FNULL, stdin=p1.stdout)
p1.stdout.close()
p2.communicate()
def nanopore_mRNA_minimap2(transcripts, genome, cpus, intron, output):
'''
function to nanopore direct mRNA reads
'''
FNULL = open(os.devnull, 'w')
bamthreads = int(round(int(cpus) / 2))
if bamthreads > 4:
bamthreads = 4
minimap2_cmd = ['minimap2', '-ax', 'splice', '-t',
str(cpus), '--cs', '-uf', '-k14', '-G', str(intron),
genome, transcripts]
samtools_cmd = ['samtools', 'sort', '--reference', genome,
'-@', str(bamthreads), '-o', output, '-']
log.debug('{} | {}'.format(' '.join(minimap2_cmd), ' '. join(samtools_cmd)))
p1 = subprocess.Popen(minimap2_cmd, stdout=subprocess.PIPE, stderr=FNULL)
p2 = subprocess.Popen(samtools_cmd, stdout=subprocess.PIPE, stderr=FNULL, stdin=p1.stdout)
p1.stdout.close()
p2.communicate()
def mergeBAMs(*args, **kwargs):
cmd = ['samtools', 'merge', '-@', str(kwargs['cpus']), kwargs['output']]
cmd = cmd + list(args)
runSubprocess(cmd, '.', log)
def catFiles(*args, **kwargs):
cmd = ['cat']
cmd = cmd + list(args)
runSubprocess2(cmd, '.', log, kwargs['output'])
def runGMAP(transcripts, genome, cpus, intron, tmpdir, output):
# first build genome database
build_log = os.path.join(tmpdir, 'gmap-build.log')
with open(build_log, 'w') as logfile:
subprocess.call(['gmap_build', '-D', tmpdir, '-d', 'genome',
'-k', '13', genome], stdout=logfile, stderr=logfile)
# now map transcripts
map_log = os.path.join(tmpdir, 'gmap-map.log')
with open(map_log, 'w') as logfile:
with open(output, 'w') as out:
subprocess.call(['gmap', '--cross-species', '-f', '3', '-K', str(intron), '-n', '1', '-t', str(
cpus), '-B', '5', '-D', tmpdir, '-d', 'genome', transcripts], stdout=out, stderr=logfile)
def runBUSCO(input, Database, cpus, tmpdir, output):
# run busco in protein mapping mode
if (sys.version_info > (3, 0)):
BUSCO = os.path.join(parentdir,
'aux_scripts', 'funannotate-BUSCO2.py')
else:
BUSCO = os.path.join(parentdir,
'aux_scripts', 'funannotate-BUSCO2-py2.py')
cmd = [BUSCO, '-i', input, '-m', 'proteins', '-l',
Database, '-o', 'busco', '-c', str(cpus), '-f']
runSubprocess(cmd, tmpdir, log)
# now parse output and write to annotation file
with open(output, 'w') as out:
with open(os.path.join(tmpdir, 'run_busco', 'full_table_busco.tsv'), 'r') as busco:
for line in busco:
if line.startswith('#'):
continue
col = line.split('\t')
# if diploid these should show up, but problematic for drawing trees....
if col[1] == 'Complete' or col[1] == 'Duplicated':
out.write("%s\tnote\tBUSCO:%s\n" % (col[2], col[0]))
def dupBUSCO2gff(ID, base_folder, locationID):
hmmerfolder = os.path.join(base_folder, 'hmmer_output')
geneID = ''
AugFile = ''
GFFfile = os.path.join(base_folder, 'augustus_output', 'gffs', ID+'.gff')
if geneID == '':
for file in os.listdir(hmmerfolder):
if file.startswith(ID):
with open(os.path.join(hmmerfolder, file), 'r') as hmmer:
for line in hmmer:
if not line.startswith('#'):
longID = line.split()[0]
longID = longID.replace(']', '')
partsID = longID.split('[')
if locationID == partsID[1]:
geneID = partsID[0]
AugFile = os.path.join(
base_folder, 'augustus_output', 'predicted_genes', file)
break
# so now should have gene name, get the GFF from augustus
with open(GFFfile, 'w') as gffout:
with open(AugFile, 'r') as augustus:
for pred in readBlocks(augustus, '# start gene'):
if pred[0].startswith('# This output'):
continue
if pred[0].startswith('##gff-version 3'):
continue
if pred[0].startswith('# Please cite'):
continue
if geneID in pred[0]:
for x in pred:
if not x.startswith('#'):
gffout.write(x)
def getCompleteBuscos(input, ploidy=1):
busco_complete = {}
with open(input, 'r') as infile:
for line in infile:
line = line.rstrip()
if line.startswith('#'):
continue
passing = ['Complete']
if ploidy > 1:
passing.append('Duplicated')
cols = line.split('\t')
if cols[1] in passing:
busco, status, gene, score, length = cols
if gene not in busco_complete:
busco_complete[gene] = busco
return busco_complete
def filterGFF3(keepDict, genome, gff3, output):
#load into Dictionary
Genes = {}
Genes = gff2dict(gff3, genome, Genes)
filtered = {}
for k,v in Genes.items():
if v['ids'][0] in keepDict:
filtered[k] = v
dict2gff3(filtered, output)
def parseBUSCO2genome(input, ploidy, ContigSizes, output):
# input is BUSCO output, ploidy is integer, ContigSizes is dictionary, output is a bedfile, function returns dictionary
busco_complete = {}
hits = {}
with open(output, 'w') as bedfile:
with open(input, 'r') as buscoinput:
for line in buscoinput:
line = line.replace('\n', '')
if line.startswith('#'):
continue
cols = line.split('\t')
if cols[1] == 'Complete' or cols[1] == 'Duplicated':
contig = cols[2]
start = cols[3]
end = cols[4]
score = cols[5]
length = cols[6]
ID = contig+':'+start+'-'+end
if cols[1] == 'Complete':
if not cols[0] in hits:
hits[cols[0]] = (
ID, score, contig, start, end, length)
if ploidy > 1:
if cols[1] == 'Duplicated':
if not cols[0] in hits:
hits[cols[0]] = (
ID, score, contig, start, end, length)
dupBUSCO2gff(
cols[0], os.path.dirname(input), ID)
else:
oldscore = float(hits.get(cols[0])[1])
if float(score) > oldscore:
hits[cols[0]] = (
ID, score, contig, start, end, length)
dupBUSCO2gff(
cols[0], os.path.dirname(input), ID)
for k, v in natsorted(list(hits.items())):
# validate locations for bedfile, move 100 bp in each direction for bedfile
start = int(v[3]) - 100
if start < 1: # negative no good
start = 1
end = int(v[4]) + 100
# check it doesn't go past contig length
if end > ContigSizes.get(contig):
end = ContigSizes.get(contig)
bedfile.write('%s\t%i\t%i\t%s\n' % (contig, start, end, k))
busco_complete[k] = v[0]
return busco_complete
def RepeatBlast(input, cpus, evalue, DataBase, tmpdir, output, diamond=True):
# run blastp against repeats
blast_tmp = os.path.join(tmpdir, 'repeats.xml')
if diamond:
blastdb = os.path.join(DataBase, 'repeats.dmnd')
cmd = ['diamond', 'blastp', '--sensitive', '--query', input, '--threads', str(cpus),
'--out', blast_tmp, '--db', blastdb, '--evalue', str(evalue), '--max-target-seqs', '1', '--outfmt', '5']
else:
blastdb = os.path.join(DataBase, 'REPEATS')
cmd = ['blastp', '-db', blastdb, '-outfmt', '5', '-out', blast_tmp, '-num_threads', str(cpus),
'-max_target_seqs', '1', '-evalue', str(evalue), '-query', input]
runSubprocess4(cmd, '.', log)
# parse results
with open(output, 'w') as out:
with open(blast_tmp, 'r') as results:
for qresult in SearchIO.parse(results, "blast-xml"):
hits = qresult.hits
ID = qresult.id
num_hits = len(hits)
if num_hits > 0:
length = 0
for i in range(0, len(hits[0].hsps)):
length += hits[0].hsps[i].aln_span
pident = hits[0].hsps[0].ident_num / float(length)
out.write("%s\t%s\t%f\t%s\n" %
(ID, hits[0].id, pident, hits[0].hsps[0].evalue))
def eggnog2dict(annotations):
# load in annotation dictionary
EggNog = {}
with open(annotations, 'r') as input:
reader = csv.reader(input, delimiter='\t')
for line in reader:
EggNog[line[1]] = line[5]
return EggNog
def number_present(s):
return any(i.isdigit() for i in s)
def capfirst(x):
return x[0].upper() + x[1:]
def item2index(inputList, item):
# return the index of an item in the input list
item_index = None
for x in inputList:
if item in x:
item_index = inputList.index(x)
return item_index
def getEggNogHeaders(input):
IDi, DBi, OGi, Genei, COGi, Desci = (None,)*6
with open(input, 'r') as infile:
for line in infile:
line = line.replace('\n', '')
if line.startswith('#query_name'): # this is HEADER
headerCols = line.split('\t')
IDi = item2index(headerCols, 'query_name')
Genei = item2index(headerCols, 'predicted_gene_name')
DBi = item2index(headerCols, 'Annotation_tax_scope')
OGi = item2index(headerCols, 'OGs')
COGi = item2index(headerCols, 'COG cat')
Desci = item2index(headerCols, 'eggNOG annot')
break
return IDi, DBi, OGi, Genei, COGi, Desci
def parseEggNoggMapper(input, output):
Definitions = {}
# indexes from header file
IDi, DBi, OGi, Genei, COGi, Desci = getEggNogHeaders(input)
# take annotations file from eggnog-mapper and create annotations
with open(output, 'w') as out:
with open(input, 'r') as infile:
for line in infile:
line = line.replace('\n', '')
if line.startswith('#'):
continue
cols = line.split('\t')
ID = cols[IDi]
DB = cols[DBi].split('[')[0]
OGs = cols[OGi].split(',')
NOG = ''
for x in OGs:
if DB in x:
NOG = 'ENOG41' + x.split('@')[0]
Gene = ''
if cols[Genei] != '':
if not '_' in cols[Genei] and not '.' in cols[Genei] and number_present(cols[Genei]):
Gene = cols[Genei]
Description = cols[Desci]
if NOG == '':
continue
if not NOG in Definitions:
Definitions[NOG] = Description
out.write("%s\tnote\tEggNog:%s\n" % (ID, NOG))
if cols[COGi] != '':
out.write("%s\tnote\tCOG:%s\n" %
(ID, cols[COGi].replace(' ', '')))
if Gene != '':
product = Gene.lower()+'p'
product = capfirst(product)
out.write("%s\tname\t%s\n" % (ID.split('-T')[0], Gene))
out.write("%s\tproduct\t%s\n" % (ID, product))
if Description != '':
out.write("%s\tnote\t%s\n" % (ID, Description))
return Definitions
def batch_iterator(iterator, batch_size):
entry = True # Make sure we loop once
while entry:
batch = []
while len(batch) < batch_size:
try:
entry = next(iterator)
except StopIteration:
entry = None
if entry is None:
# End of file
break
batch.append(entry)
if batch:
yield batch
def fasta2chunks(input, chunks, tmpdir, output):
# split the input fasta file into 20 chunks to process
with open(input, 'r') as seqs:
SeqCount = countfasta(input)
SeqRecords = SeqIO.parse(seqs, 'fasta')
chunks = SeqCount / int(chunks)
# divide into chunks, store in tmp file
folder = os.path.join(tmpdir, output)
if not os.path.exists(folder):
os.makedirs(folder)
else:
shutil.rmtree(folder)
os.makedirs(folder)
for i, batch in enumerate(batch_iterator(SeqRecords, chunks)):
filename = "chunk_%i.fa" % (i+1)
tmpout = os.path.join(folder, filename)
handle = open(tmpout, "w")
SeqIO.write(batch, handle, "fasta")
handle.close()
def signalP(input, tmpdir, output):
# split input file into chunks, 20 should mean < 200 proteins per chunk
from funannotate.check import check_version7
version = check_version7('signalp')
if '.' in version:
version = int(version.split('.')[0])
if version > 4:
cmd = ['signalp', '-stdout', '-org', 'euk', '-format', 'short', '-fasta']
else:
cmd = ['signalp', '-t', 'euk', '-f', 'short']
fasta2chunks(input, 40, tmpdir, 'signalp_tmp')
for file in os.listdir(os.path.join(tmpdir, 'signalp_tmp')):
if file.startswith('chunk'):
file = os.path.join(tmpdir, 'signalp_tmp', file)
tmp_out = re.sub(r'\.fa$','.signalp.out',file)
cmd1 = cmd + [file]
runSubprocess2(cmd1, '.', log, tmp_out)
# now concatenate all outputs
if os.path.isfile(output):
os.remove(output)
with open(output, 'a') as finalout:
for file in os.listdir(os.path.join(tmpdir, 'signalp_tmp')):
if file.endswith('.signalp.out'):
file = os.path.join(tmpdir, 'signalp_tmp', file)
with open(file) as infile:
finalout.write(infile.read())
# cleanup tmp directory
shutil.rmtree(os.path.join(tmpdir, 'signalp_tmp'))
def parseSignalP(sigP, secretome_annot):
sigpDict = {}
version = 4
with open(sigP, 'r') as results:
for line in results:
line = line.rstrip()
if line.startswith('#'):
if line.startswith('# SignalP-5'):
version = 5
elif line.startswith('# SignalP-6'):
version = 6
continue
if version < 5:
col = line.split(' ') # not tab delimited
col = [_f for _f in col if _f] # clean up empty spaces
if col[9] == 'Y': # then there is signal peptide
ID = col[0]
end = int(col[2]) - 1
sigpDict[ID] = [end, '', '']
elif version == 5: # version 5 has different format and tab delimited hooray!
if '\t' in line:
cols = line.split('\t')
if cols[1] != 'OTHER': # then signal peptide
ID, prediction, score1, score2, position = cols[:5]
components = position.split()
pos = components[2].split('-')[0]
prob = components[-1]
aa = components[3].replace('.', '')
sigpDict[ID] = [pos, aa, prob]
else:
if '\t' in line:
cols = line.split('\t')
if cols[1] == 'SP': # then signal peptide
try:
ID, prediction, score1, score2, position = cols[:5]
except ValueError:
sys.stderr.write('signalP parse error: {}\n'.format(line))
continue
components = position.split()
pos = components[2].split('-')[0]
prob = components[-1]
sigpDict[ID] = [pos, '', prob]
with open(secretome_annot, 'w') as secout:
for k, v in natsorted(list(sigpDict.items())):
if v[1] != '':
secout.write("{:}\tnote\tSECRETED:SignalP(1-{:},cutsite={:},prob={:})\n".format(k, v[0], v[1], v[2]))
else:
secout.write("{:}\tnote\tSECRETED:SignalP(1-{:})\n".format(k, v[0]))
def parsePhobiusSignalP(phobius, sigP, membrane_annot, secretome_annot):
# give directory of annotate_misc, first get phobius results
'''
This is what phobius results look like
ID TM SP Prediction
VE00_00001 0 0 o
VE00_00002 2 0 i198-219o283-301i
VE00_00003 0 0 o
VE00_00004 0 Y n8-18c23/24o
VE00_00005 12 0 i49-69o89-107i119-138o144-167i179-200o212-234i280-299o319-341i348-366o378-398i410-430o442-465i
'''
pSecDict = {}
pTMDict = {}
sigpDict = {}
# parsing short format phobius
with open(phobius, 'r') as input1:
for line in input1:
line = line.rstrip()
if line.startswith('ID') or line.startswith('SEQ'):
continue
if '\t' in line:
cols = line.split('\t')
else:
cols = line.split()
geneID = cols[0]
if int(cols[1]) > 0: # then found TM domain
annot = cols[3]
if not geneID in pTMDict:
pTMDict[geneID] = 'TransMembrane:'+cols[1]+' ('+annot+')'
if cols[2] == 'Y': # then sig pep discovered
location = cols[3].split('/')[0]
clevage = location.split('c')[-1]
if not geneID in pSecDict:
pSecDict[geneID] = [clevage, '', '']
if sigP: # will be passed FALSE if signalP data missing
# parse signalp output and turn into annotation file
version = 4
with open(sigP, 'r') as results:
for line in results:
line = line.rstrip()
if line.startswith('#'):
if line.startswith('# SignalP-5'):
version = 5
continue
if version < 5:
col = line.split(' ') # not tab delimited
col = [_f for _f in col if _f] # clean up empty spaces
if col[9] == 'Y': # then there is signal peptide
ID = col[0]
end = int(col[2]) - 1
sigpDict[ID] = [end, '', '']
else: # version 5 has different format and tab delimited hooray!
if '\t' in line:
cols = line.split('\t')
if cols[1] != 'OTHER': # then signal peptide
ID, prediction, score1, score2, position = cols[:5]
components = position.split()
pos = components[2].split('-')[0]
prob = components[-1]
aa = components[3].replace('.', '')
sigpDict[ID] = [pos, aa, prob]
else:
sigpDict = pSecDict
# write annotation files
with open(membrane_annot, 'w') as memout:
for k, v in natsorted(list(pTMDict.items())):
memout.write("%s\tnote\t%s\n" % (k, v))
with open(secretome_annot, 'w') as secout:
for k, v in natsorted(list(sigpDict.items())):
if v[1] != '':
secout.write("{:}\tnote\tSECRETED:SignalP(1-{:},cutsite={:},prob={:})\n".format(k, v[0], v[1], v[2]))
else:
secout.write("{:}\tnote\tSECRETED:SignalP(1-{:})\n".format(k, v[0]))
def n_lower_chars(string):
return sum(1 for c in string if c.islower())
def CheckAugustusSpecies(input):
# get the possible species from augustus
augustus_list = []
for i in os.listdir(os.path.join(os.environ["AUGUSTUS_CONFIG_PATH"], 'species')):
if not i.startswith('.'):
augustus_list.append(i)
augustus_list = set(augustus_list)
if input in augustus_list:
return True
else:
return False
def CheckFunannotateSpecies(input, db):
# get the possible species from funannotateDB dir -- on install mirrored Augustus
species_list = []
for i in os.listdir(os.path.join(db, 'trained_species')):
if not i.startswith('.'):
species_list.append(i)
species_list = set(species_list)
if input in species_list:
return True
else:
return False
def SortRenameHeaders(input, output):
# sort records and write temp file
with open(output, 'w') as out:
with open(input, 'r') as input:
records = list(SeqIO.parse(input, 'fasta'))
records.sort(cmp=lambda x, y: cmp(len(y), len(x)))
counter = 1
for rec in records:
rec.name = ''
rec.description = ''
rec.id = 'scaffold_' + str(counter)
counter += 1
SeqIO.write(records, out, 'fasta')
def validate_tRNA(input, genes, gaps, output):
# run bedtools intersect to keep only input that dont intersect with either genes or gaps
sortedInput = os.path.abspath(input)+'.sorted.gff3'
#sortGFFproper(input, sortedInput)
cmd1 = ['bedtools', 'sort', '-i', input]
with open(sortedInput, 'w') as outfile:
subprocess.call(cmd1, stdout=outfile)
sortedGenes = os.path.abspath(genes)+'.sorted.gff3'
#sortGFFproper(genes, sortedGenes)
cmd2 = ['bedtools', 'sort', '-i', genes]
with open(sortedGenes, 'w') as outfile:
subprocess.call(cmd2, stdout=outfile)
if gaps:
sortedGaps = os.path.abspath(gaps)+'.sorted.gff3'
#sortGFFproper(gaps, sortedGaps)
cmd3 = ['bedtools', 'sort', '-i', gaps]
with open(sortedGaps, 'w') as outfile:
subprocess.call(cmd3, stdout=outfile)
cmd = ['bedtools', 'intersect', '-sorted', '-v', '-a', sortedInput, '-b', sortedGenes]
if gaps:
cmd.append(sortedGaps)
tmpOut = os.path.abspath(output)+'.tmp'
runSubprocess2(cmd, '.', log, tmpOut)
# now sort properly
sortGFFproper(tmpOut, output)
os.remove(tmpOut)
# via https://stackoverflow.com/questions/2154249/identify-groups-of-continuous-numbers-in-a-list
def list2groups(L):
if len(L) < 1:
return
first = last = L[0]
for n in L[1:]:
if n - 1 == last: # Part of the group, bump the end
last = n
else: # Not part of the group, yield current group and start a new
yield first, last
first = last = n
yield first, last # Yield the last group
def checkMask(genome, bedfile):
from Bio.SeqIO.FastaIO import SimpleFastaParser
# load contig names and sizes into dictionary, get masked repeat stats
GenomeLength = 0
maskedSize = 0
masked = {}
ContigSizes = {}
with open(genome, 'r') as input:
for header, Seq in SimpleFastaParser(input):
if ' ' in header:
ID = header.split(' ')[0]
else:
ID = header
if not ID in masked:
masked[ID] = []
if not ID in ContigSizes:
ContigSizes[ID] = len(Seq)
GenomeLength += len(Seq)
maskedSize += n_lower_chars(Seq)
for i, c in enumerate(Seq):
if c.islower():
masked[ID].append(i) # 0 based
if maskedSize == 0: # not softmasked, return False
with open(bedfile, 'w') as bedout:
bedout.write('')
return ContigSizes, GenomeLength, maskedSize, 0.0
else:
counter = 1
with open(bedfile, 'w') as bedout:
for k, v in natsorted(list(masked.items())):
repeats = list(list2groups(v))
for item in repeats:
if len(item) == 2:
bedout.write('{:}\t{:}\t{:}\tRepeat_{:}\n'.format(
k, item[0], item[1], counter))
counter += 1
percentMask = maskedSize / float(GenomeLength)
return ContigSizes, GenomeLength, maskedSize, percentMask
def maskingstats2bed(input, counter, alock):
from Bio.SeqIO.FastaIO import SimpleFastaParser
masked = []
gaps = []
maskedSize = 0
bedfilename = input.replace('.fasta', '.bed')
gapfilename = input.replace('.fasta', '.gaps')
with open(input, 'r') as infile:
for header, Seq in SimpleFastaParser(infile):
if ' ' in header:
ID = header.split(' ')[0]
else:
ID = header
for i, c in enumerate(Seq):
if c == 'N' or c == 'n':
masked.append(i)
maskedSize += 1
gaps.append(i)
elif c.islower():
masked.append(i) # 0 based
maskedSize += 1
if maskedSize > 0: # not softmasked, return False
with open(bedfilename, 'w') as bedout:
repeats = list(list2groups(masked))
for item in repeats:
if len(item) == 2:
bedout.write('{:}\t{:}\t{:}\tRepeat_\n'.format(
ID, item[0], item[1]))
if len(gaps) > 0:
with open(gapfilename, 'w') as gapout:
bedGaps = list(list2groups(gaps))
for item in bedGaps:
if len(item) == 2:
gapout.write(
'{:}\t{:}\t{:}\tassembly-gap_\n'.format(ID, item[0], item[1]))
with alock:
counter.value += maskedSize
def mask_safe_run(*args, **kwargs):
"""Call run(), catch exceptions."""
try:
maskingstats2bed(*args, **kwargs)
except Exception as e:
print(("error: %s run(*%r, **%r)" % (e, args, kwargs)))
def checkMasklowMem(genome, bedfile, gapsfile, cpus):
from Bio.SeqIO.FastaIO import SimpleFastaParser
# load contig names and sizes into dictionary, get masked repeat stats
ContigSizes = {}
tmpdir = os.path.join(os.path.dirname(genome), 'mask_'+str(uuid.uuid4()))
os.makedirs(tmpdir)
file_list = []
with open(genome, 'r') as input:
for header, Seq in SimpleFastaParser(input):
if ' ' in header:
ID = header.split(' ')[0]
else:
ID = header
if not ID in ContigSizes:
ContigSizes[ID] = len(Seq)
with open(os.path.join(tmpdir, ID+'.fasta'), 'w') as fastaout:
fastaout.write('>{:}\n{:}\n'.format(ID, Seq))
file_list.append(os.path.join(tmpdir, ID+'.fasta'))
# num = 1
p = multiprocessing.Pool(processes=cpus)
TotalMask = multiprocessing.Manager().Value('i', 0)
lock = multiprocessing.Manager().Lock()
result = []
for i in file_list:
result.append(p.apply_async(mask_safe_run, [i, TotalMask, lock]))
p.close()
p.join()
repeatNum = 1
gapNum = 1
with open(bedfile, 'w') as bedout:
for file in natsorted(os.listdir(tmpdir)):
if file.endswith('.bed'):
with open(os.path.join(tmpdir, file), 'r') as infile:
for line in infile:
line = line.replace(
'Repeat_', 'Repeat_'+str(repeatNum))
bedout.write(line)
repeatNum += 1
with open(gapsfile, 'w') as gapout:
for file in natsorted(os.listdir(tmpdir)):
if file.endswith('.gaps'):
with open(os.path.join(tmpdir, file), 'r') as infile:
for line in infile:
line = line.replace(
'assembly-gap_', 'assembly-gap_'+str(gapNum))
gapout.write(line)
gapNum += 1
SafeRemove(tmpdir)
GenomeLength = sum(ContigSizes.values())
percentMask = TotalMask.value / float(GenomeLength)
return ContigSizes, GenomeLength, TotalMask.value, percentMask
def RunGeneMarkES(command, input, ini, maxintron, softmask, cpus, tmpdir, output, fungus):
# make directory to run script from
outdir = os.path.join(tmpdir, 'genemark')
if not os.path.isdir(outdir):
os.makedirs(outdir)
if cpus > 64:
cpus = 64
contigs = os.path.abspath(input)
log.info("Running GeneMark-ES on assembly")
cmd = [command, '--ES', '--max_intron', str(maxintron), '--soft_mask', str(
softmask), '--cores', str(cpus), '--sequence', contigs]
if fungus == 'fungus':
cmd = cmd + ['--fungus']
if ini:
cmd = cmd + ['--ini_mod', os.path.abspath(ini)]
runSubprocess3(cmd, outdir, log)
# rename results and grab mod file
try:
os.rename(os.path.join(outdir, 'output', 'gmhmm.mod'),
os.path.join(tmpdir, 'gmhmm.mod'))
except OSError:
log.error("GeneMark-ES failed: {:} file missing, please check logfiles.".format(
os.path.join(outdir, 'output', 'gmhmm.mod')))
# convert genemark gtf to gff3 so GAG can interpret it
gm_gtf = os.path.join(outdir, 'genemark.gtf')
if checkannotations(gm_gtf):
# log.info("Converting GeneMark GTF file to GFF3")
with open(output, 'w') as out:
subprocess.call([GeneMark2GFF, gm_gtf], stdout=out)
def RunGeneMarkET(command, input, ini, evidence, maxintron, softmask, cpus, tmpdir, output, fungus):
# make directory to run script from
outdir = os.path.join(tmpdir, 'genemark')
if not os.path.isdir(outdir):
os.makedirs(outdir)
if cpus > 64:
cpus = 64
contigs = os.path.abspath(input)
# get only intron information from evidence
hintsfile = os.path.join(tmpdir, 'genemark.intron-hints.gff')
with open(hintsfile, 'w') as hints:
with open(evidence, 'r') as evid:
for line in evid:
if '\tintron\t' in line and '\tb2h\t' in line:
tmprow = line.split("\t")
tmprow[5] = "500" # for intron hint score to be 500
hints.write("\t".join(tmprow))
log.info("Running GeneMark-ET on assembly")
cmd = [command, '--ET', os.path.abspath(hintsfile), '--max_intron', str(
maxintron), '--soft_mask', str(softmask), '--cores', str(cpus), '--sequence', contigs]
if fungus == 'fungus':
cmd = cmd + ['--fungus']
if ini:
cmd = cmd + ['--ini_mod', os.path.abspath(ini)]
runSubprocess3(cmd, outdir, log)
# rename results and grab mod file
try:
os.rename(os.path.join(outdir, 'output', 'gmhmm.mod'),
os.path.join(tmpdir, 'gmhmm.mod'))
except OSError:
log.error("GeneMark-ET failed: {:} file missing, please check logfiles.".format(
os.path.join(outdir, 'output', 'gmhmm.mod')))
# convert genemark gtf to gff3 so GAG can interpret it
gm_gtf = os.path.join(outdir, 'genemark.gtf')
if checkannotations(gm_gtf):
# log.info("Converting GeneMark GTF file to GFF3")
with open(output, 'w') as out:
subprocess.call([GeneMark2GFF, gm_gtf], stdout=out)
def dict2glimmer(input, output):
# take funannotate dictionary convert to glimmer training format
with open(output, 'w') as outfile:
for k, v in list(input.items()):
for i in range(0, len(v['ids'])):
for c in v['CDS'][i]:
if v['strand'] == '+':
outfile.write('{:} {:} {:}\n'.format(
v['contig'], c[0], c[1]))
else:
outfile.write('{:} {:} {:}\n'.format(
v['contig'], c[1], c[0]))
outfile.write('\n')
def glimmer2gff3(input, output):
'''
scaffold_39 GlimmerHMM mRNA 23692 25015 . + . ID=scaffold_39.path1.gene12;Name=scaffold_39.path1.gene12
scaffold_39 GlimmerHMM CDS 23692 23886 . + 0 ID=scaffold_39.cds12.1;Parent=scaffold_39.path1.gene12;Name=scaffold_39.path1.gene12;Note=initial-exon
scaffold_39 GlimmerHMM CDS 24282 24624 . + 0 ID=scaffold_39.cds12.2;Parent=scaffold_39.path1.gene12;Name=scaffold_39.path1.gene12;Note=internal-exon
scaffold_39 GlimmerHMM CDS 24711 25015 . + 2 ID=scaffold_39.cds12.3;Parent=scaffold_39.path1.gene12;Name=scaffold_39.path1.gene12;Note=final-exon
scaffold_39 GlimmerHMM mRNA 25874 27899 . - . ID=scaffold_39.path1.gene13;Name=scaffold_39.path1.gene13
scaffold_39 GlimmerHMM CDS 25874 26973 . - 2 ID=scaffold_39.cds13.1;Parent=scaffold_39.path1.gene13;Name=scaffold_39.path1.gene13;Note=final-exon
scaffold_39 GlimmerHMM CDS 27257 27899 . - 0 ID=scaffold_39.cds13.2;Parent=scaffold_39.path1.gene13;Name=scaffold_39.path1.gene13;Note=initial-exon
'''
with open(output, 'w') as outfile:
outfile.write(("##gff-version 3\n"))
exonCounts = {}
GeneCount = 1
skipList = []
idsSeen = {}
with open(input, 'r') as infile:
for i, line in enumerate(infile):
if line.startswith('##sequence-region'):
idsSeen = {}
if line.startswith('#') or line.startswith('\n'):
continue
line = line.strip()
if line.count('\t') < 8:
print('ERROR parsing GlimmerHMM Raw output in line {}:\n {}'.format(i+1, line))
continue
contig, source, feature, start, end, score, strand, phase, attributes = line.split('\t')
ID, Parent, Name = (None,)*3
info = attributes.split(';')
for x in info:
if x.startswith('ID='):
ID = x.replace('ID=', '')
elif x.startswith('Parent='):
Parent = x.replace('Parent=', '')
if Parent and Parent in skipList:
continue
if feature == 'mRNA':
genelen = int(end) - int(start)
if genelen < 150:
if not ID in skipList:
skipList.append(ID)
continue
geneID = 'glimmerG_'+str(GeneCount)
transID = 'glimmerT_'+str(GeneCount)+'-T1'
idsSeen[ID] = (geneID, transID)
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:};Alias={:};\n'.format(
contig, source, 'gene', start, end, score, strand, phase, geneID, ID))
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:};Parent={:};Alias={:};\n'.format(
contig, source, 'mRNA', start, end, '.', strand, '.', transID, geneID, ID))
GeneCount += 1
elif feature == 'CDS':
if Parent in idsSeen:
geneID, transID = idsSeen.get(Parent)
if not transID in exonCounts:
exonCounts[transID] = 1
else:
exonCounts[transID] += 1
num = exonCounts.get(transID)
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:}.exon{:};Parent={:};\n'.format(
contig, source, 'exon', start, end, '.', strand, '.', transID, num, transID))
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:}.cds;Parent={:};\n'.format(
contig, source, feature, start, end, score, strand, phase, transID, transID))
else:
print('ERROR parsing GlimmerHMM Raw output in line {}:\n {}'.format(i+1, line))
def runGlimmerHMM(fasta, gff3, dir, output):
'''
wrapper to run GlimmerHMM training followed by prediction
input is GFF3 format high quality models, i.e. from PASA/transdecoder
output is standard GFF3 format
'''
# generate training directory ontop of the dir that is passed
tmpdir = os.path.join(dir, 'glimmerhmm')
if os.path.isdir(tmpdir):
SafeRemove(tmpdir)
# generate glimmer training input
# load gff3 into dictionary
Genes = {}
Genes = gff2dict(os.path.abspath(gff3), os.path.abspath(fasta), Genes)
glimmExons = os.path.join(dir, 'glimmer.exons')
dict2glimmer(Genes, glimmExons)
# now run trainGlimmerHMM
cmd = ['trainGlimmerHMM', os.path.abspath(
fasta), os.path.abspath(glimmExons), '-d', tmpdir]
runSubprocess4(cmd, '.', log) # runSubproces4 --> stdout/stderr to devnull
# now run GlimmerHMM prediction
# glimmhmm.pl <glimmerhmm_program> <fasta_file> <train_dir> <options>
glimmerRaw = os.path.abspath(os.path.join(dir, 'glimmerHMM.output.raw'))
cmd = ['perl', which_path('glimmhmm.pl'), which_path(
'glimmerhmm'), os.path.abspath(fasta), os.path.abspath(tmpdir), '-g']
runSubprocess2(cmd, dir, log, glimmerRaw)
# now convert to proper GFF3 format
glimmer2gff3(glimmerRaw, output)
return os.path.abspath(tmpdir)
def runGlimmerHMMTrained(fasta, training, dir, output):
glimmerRaw = os.path.abspath(os.path.join(dir, 'glimmerHMM.output.raw'))
cmd = ['perl', which_path('glimmhmm.pl'), which_path(
'glimmerhmm'), os.path.abspath(fasta), os.path.abspath(training), '-g']
runSubprocess2(cmd, dir, log, glimmerRaw)
# now convert to proper GFF3 format
glimmer2gff3(glimmerRaw, output)
def glimmer_run_check(Result, training, weights):
if checkannotations(Result):
log.info('Using existing GlimmerHMM results: {:}'.format(Result))
return False
if not checkannotations(training):
log.info(
'GlimmerHMM training failed, empty training set: {:}'.format(training))
return False
if weights < 1:
log.info(
'Skipping GlimmerHMM prediction as weight set to {:}'.format(weights))
return False
programs = ['trainGlimmerHMM', 'glimmerhmm', 'glimmhmm.pl']
for x in programs:
if not which_path(x):
log.info(
'GlimmerHMM failed, dependency not in $PATH: {:}'.format(x))
return False
return True
def dict2zff(scaffoldDict, GeneDict, output):
# take funannotate dictionary convert to zff training format
with open(output, 'w') as outfile:
for k, v in natsorted(list(scaffoldDict.items())):
outfile.write('>{:}\n'.format(k))
for genes in v:
gd = GeneDict.get(genes)
for i in range(0, len(gd['ids'])):
for num, c in enumerate(gd['CDS'][i]):
if gd['strand'] == '+':
start = c[0]
end = c[1]
else:
start = c[1]
end = c[0]
if num == 0:
outfile.write('Einit\t{:}\t{:}\t{:}\n'.format(
start, end, gd['ids'][i]))
elif num == len(gd['CDS'][i])-1:
outfile.write('Eterm\t{:}\t{:}\t{:}\n'.format(
start, end, gd['ids'][i]))
else:
outfile.write('Exon\t{:}\t{:}\t{:}\n'.format(
start, end, gd['ids'][i]))
def zff2gff3(input, fasta, output):
'''
>scaffold_40
Einit 7104 7391 - 14.809 0 0 1 scaffold_40-snap.1
Eterm 6728 7039 - 1.974 0 0 2 scaffold_40-snap.1
Einit 8935 9070 + 9.578 0 1 0 scaffold_40-snap.2
Exon 9119 9206 + 10.413 2 2 0 scaffold_40-snap.2
Exon 9254 9389 + 21.529 1 0 2 scaffold_40-snap.2
Eterm 9439 10128 + 42.769 0 0 0 scaffold_40-snap.2
Einit 11784 12139 - 38.847 0 2 2 scaffold_40-snap.3
Eterm 11185 11761 - 72.324 1 0 0 scaffold_40-snap.3
Einit 13191 13250 - 7.662 0 0 1 scaffold_40-snap.4
Eterm 12498 13019 - 63.296 0 0 1 scaffold_40-snap.4
Einit 16359 16608 + 41.592 0 1 2 scaffold_40-snap.5
Exon 16628 16712 + 13.780 2 2 0 scaffold_40-snap.5
Exon 16795 17012 + 26.393 1 1 1 scaffold_40-snap.5
Eterm 17224 17381 + 8.331 2 0 2 scaffold_40-snap.5
>scaffold_41
Exon 65 951 - 169.146 1 1 0 scaffold_41-snap.1
'''
# need to load/generate a funannotate dictionary, then output to gff3 format
Genes = {}
contig = ''
with open(input, 'r') as infile:
for line in infile:
line = line.strip()
if line.startswith('#') or line.startswith('\n'):
continue
elif line.startswith('>'):
contig = line[1:]
else:
feature, start, end, strand, score, fiveo, threeo, phase, ID = line.split(
'\t')
start = int(start)
end = int(end)
# phase = int(phase)
phase = '?' # phase in GFF3 doesn't seem to be accurate, so guess it by translation of all 3 frames
if not ID in Genes:
Genes[ID] = {'name': None, 'type': 'mRNA', 'transcript': [], 'cds_transcript': [], 'protein': [], '5UTR': [[]], '3UTR': [[]],
'codon_start': [[]], 'ids': [ID+'-T1'], 'CDS': [[(start, end)]], 'mRNA': [[(start, end)]], 'strand': strand,
'location': (start, end), 'contig': contig, 'product': [[]], 'source': 'snap', 'phase': [[phase]],
'db_xref': [[]], 'EC_number': [[]], 'gene_synonym': [], 'go_terms': [[]], 'note': [[]], 'partialStart': [[]], 'partialStop': [[]], 'pseudo': False}
else:
Genes[ID]['CDS'][0].append((start, end))
Genes[ID]['mRNA'][0].append((start, end))
Genes[ID]['phase'][0].append(phase)
if start < Genes[ID]['location'][0]:
Genes[ID]['location'] = (
start, Genes[ID]['location'][1])
if end > Genes[ID]['location'][1]:
Genes[ID]['location'] = (Genes[ID]['location'][0], end)
# translate, check partial, etc
SeqRecords = SeqIO.to_dict(SeqIO.parse(fasta, 'fasta'))
for k, v in list(Genes.items()):
i = 0
if v['strand'] == '+':
sortedExons = sorted(v['mRNA'][i], key=lambda tup: tup[0])
sortedCDS = sorted(v['CDS'][i], key=lambda tup: tup[0])
else:
sortedExons = sorted(
v['mRNA'][i], key=lambda tup: tup[0], reverse=True)
sortedCDS = sorted(
v['CDS'][i], key=lambda tup: tup[0], reverse=True)
Genes[k]['mRNA'][i] = sortedExons
mrnaSeq = getSeqRegions(SeqRecords, v['contig'], sortedExons)
Genes[k]['transcript'].append(mrnaSeq)
# get the codon_start by getting first CDS phase + 1
indexStart = [x for x, y in enumerate(
v['CDS'][i]) if y[0] == sortedCDS[0][0]]
cdsSeq = getSeqRegions(SeqRecords, v['contig'], sortedCDS)
# this seems to be missing removal of codon_start overhang?
Genes[k]['CDS'][i] = sortedCDS
protSeq,codon_start = (None,)*2
if '?' in v['phase'][i]: # dont know the phase -- malformed GFF3, try to find best CDS
translateResults = []
for y in [1, 2, 3]:
protSeq = translate(cdsSeq, v['strand'], y-1)
numStops = protSeq.count('*')
if protSeq[-1] == '*':
numStops -= 1
translateResults.append((y, numStops, protSeq))
sortedResults = sorted(translateResults, key=lambda tup: tup[1])
codon_start = sortedResults[0][0]
protSeq = sortedResults[0][2]
else:
codon_start = int(v['phase'][i][indexStart[0]]) + 1
# translate and get protein sequence
cdsSeq = cdsSeq[codon_start-1:]
protSeq = translate(cdsSeq, v['strand'], codon_start-1)
Genes[k]['codon_start'][i] = codon_start
if codon_start > 1:
if v['strand'] == '+':
cdsSeq = cdsSeq[codon_start - 1:]
elif v['strand'] == '-':
endTrunc = len(cdsSeq) - codon_start -1
cdsSeq = cdsSeq[0:endTrunc]
else:
print("ERROR nonsensical strand (%s) for gene %s"%([v['strand'],k]))
Genes[k]['cds_transcript'].append(cdsSeq)
if protSeq:
Genes[k]['protein'].append(protSeq)
if protSeq.endswith('*'):
Genes[k]['partialStop'][i] = False
else:
Genes[k]['partialStop'][i] = True
if codon_start == 1 and protSeq.startswith('M'):
Genes[k]['partialStart'][i] = False
else:
Genes[k]['partialStart'][i] = True
# now write to GFF3
dict2gff3(Genes, output)
def cq_run_check(cqResult, bam, stringtie, weight):
if checkannotations(cqResult):
log.info('Using existing CodingQuarry results: {:}'.format(cqResult))
return False
if weight < 1:
log.info(
'Skipping CodingQuarry prediction as weight set to {:}'.format(weight))
return False
if not bam and not stringtie:
log.info('Skipping CodingQuarry as there are no RNA-seq data')
return False
# check if dependencies installed
programs = []
if stringtie and checkannotations(stringtie):
programs = ['CodingQuarry']
elif bam and checkannotations(bam):
programs = ['stringtie', 'CodingQuarry']
for x in programs:
if not which_path(x):
log.info(
'CodingQuarry failed, dependency not in $PATH: {:}'.format(x))
return False
# if you get here should be good
return True
def snap_run_check(snapResult, training, weight):
if checkannotations(snapResult):
log.info('Using existing snap results: {:}'.format(snapResult))
return False
if not checkannotations(training):
log.info(
'Snap training failed, empty training set: {:}'.format(training))
return False
if weight < 1:
log.info(
'Skipping snap prediction as weight set to {:}'.format(weight))
return False
programs = ['fathom', 'snap', 'forge', 'hmm-assembler.pl']
for x in programs:
if not which_path(x):
log.info('Snap failed, dependency not in $PATH: {:}'.format(x))
return False
return True
def runSnap(fasta, gff3, minintron, maxintron, dir, output):
from Bio.SeqIO.FastaIO import SimpleFastaParser
'''
wrapper to run Snap training followed by prediction
input is GFF3 format high quality models, i.e. from PASA/transdecoder
output is standard GFF3 format
'''
from collections import OrderedDict
snapHMM = os.path.join(dir, 'snap-trained.hmm')
snapRaw = os.path.join(dir, 'snap-prediction.zff')
if not checkannotations(snapRaw):
# generate training directory ontop of the dir that is passed
tmpdir = os.path.join(dir, 'snaptrain')
if os.path.isdir(tmpdir):
SafeRemove(tmpdir)
os.makedirs(tmpdir)
# load gff3 into dictionary
Genes = {}
Genes = gff2dict(os.path.abspath(gff3), os.path.abspath(fasta), Genes)
scaff2genes = {}
# sort the dictionary
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
sGenes = sorted(iter(Genes.items()), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
scaff2genes = {}
for k, v in list(sortedGenes.items()):
if not v['contig'] in scaff2genes:
scaff2genes[v['contig']] = [k]
else:
scaff2genes[v['contig']].append(k)
# get only scaffolds that have gene models for training
log.debug('{:} gene models to train snap on {:} scaffolds'.format(
len(sGenes), len(scaff2genes)))
trainingFasta = os.path.join(dir, 'snap-training.scaffolds.fasta')
with open(trainingFasta, 'w') as outfile:
with open(os.path.abspath(fasta), 'r') as infile:
for title, seq in SimpleFastaParser(infile):
if title in list(scaff2genes.keys()):
outfile.write('>{:}\n{:}\n'.format(
title, softwrap(seq)))
# convert to ZFF format
origzff = os.path.join(dir, 'snap.training.zff')
dict2zff(scaff2genes, Genes, origzff)
# now run SNAP training
cmd = ['fathom', os.path.abspath(origzff), os.path.abspath(
trainingFasta), '-categorize', '1000', '-min-intron', str(minintron), '-max-intron', str(maxintron)]
runSubprocess(cmd, tmpdir, log)
cmd = ['fathom', 'uni.ann', 'uni.dna', '-export', '1000', '-plus']
runSubprocess(cmd, tmpdir, log)
cmd = ['forge', 'export.ann', 'export.dna']
runSubprocess(cmd, tmpdir, log)
cmd = ['perl', which_path('hmm-assembler.pl'), 'snap-trained', tmpdir]
runSubprocess2(cmd, '.', log, snapHMM)
# now run SNAP prediction
cmd = ['snap', os.path.abspath(snapHMM), os.path.abspath(fasta)]
runSubprocess2(cmd, '.', log, snapRaw)
# convert zff to proper gff3
zff2gff3(snapRaw, fasta, output)
return os.path.abspath(snapHMM)
def runSnapTrained(fasta, hmm, dir, output):
snapRaw = os.path.join(dir, 'snap-prediction.zff')
# now run SNAP prediction
cmd = ['snap', os.path.abspath(hmm), os.path.abspath(fasta)]
runSubprocess2(cmd, '.', log, snapRaw)
# convert zff to proper gff3
zff2gff3(snapRaw, fasta, output)
def MemoryCheck():
import psutil
mem = psutil.virtual_memory()
RAM = int(mem.total)
return round(RAM / 1024000000)
def systemOS():
if sys.platform == 'darwin':
system_os = 'MacOSX ' + platform.mac_ver()[0]
elif sys.platform == 'linux':
linux_version = distro.linux_distribution()
system_os = linux_version[0] + ' '+linux_version[1]
else:
system_os = sys.platform
return system_os
def SystemInfo():
system_os = systemOS()
python_vers = str(
sys.version_info[0])+'.'+str(sys.version_info[1])+'.'+str(sys.version_info[2])
log.info("OS: %s, %i cores, ~ %i GB RAM. Python: %s" %
(system_os, multiprocessing.cpu_count(), MemoryCheck(), python_vers))
def runtRNAscan(input, tmpdir, output, cpus=1, precalc=False):
tRNAout = os.path.join(tmpdir, 'tRNAscan.out')
tRNAlenOut = os.path.join(tmpdir, 'tRNAscan.len-filtered.out')
if not precalc:
if os.path.isfile(tRNAout): # tRNAscan can't overwrite file, so check
os.remove(tRNAout)
cmd = ['tRNAscan-SE', '-o', tRNAout, '--thread', str(cpus), input]
log.debug(' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
log.error('CMD ERROR: {}'.format(' '.join(cmd)))
if stdout:
log.debug(stdout.decode("utf-8"))
if stderr:
log.debug(stderr.decode("utf-8"))
else:
shutil.copyfile(precalc, tRNAout)
if not checkannotations(tRNAout):
log.info('tRNAscan-SE seems to have failed, check logfile for error. You can pass precalculated results to --trnascan')
return False
# enforce NCBI length rules
with open(tRNAlenOut, 'w') as lenOut:
with open(tRNAout, 'r') as infile:
for line in infile:
if line.startswith('Sequence') or line.startswith('Name') or line.startswith('--------'):
lenOut.write('%s' % line)
else:
cols = line.split('\t')
start = cols[2]
end = cols[3]
if int(start) < int(end):
length = abs(int(end) - int(start))
else:
length = abs(int(start) - int(end))
if length < 50 or length > 150:
continue
else:
lenOut.write('%s' % line)
# now convert to GFF3
trna2gff = os.path.join(parentdir, 'aux_scripts', 'trnascan2gff3.pl')
with open(output, 'w') as out:
subprocess.call(['perl', trna2gff, '--input', tRNAlenOut], stdout=out)
return True
def runtbl2asn(folder, template, discrepency, organism, isolate, strain, parameters, version):
'''
function to run NCBI tbl2asn
'''
# get funannotate version
fun_version = get_version()
# input should be a folder
if not os.path.isdir(folder):
log.error("tbl2asn error: %s is not a directory, exiting" % folder)
sys.exit(1)
# based on organism, isolate, strain, construct meta info for -j flag
if not organism:
log.error("tbl2asn error: organism not specified")
sys.exit(1)
meta = "[organism=" + organism + "]"
if isolate:
isolate_meta = "[isolate=" + isolate + "]"
meta = meta + " " + isolate_meta
if strain:
strain_meta = "[strain=" + strain + "]"
meta = meta + " " + strain_meta
cmd = ['tbl2asn', '-y', '"Annotated using '+fun_version+'"', '-N',
str(version), '-p', folder, '-t', template, '-M', 'n', '-Z', discrepency, '-j', '"'+meta+'"', '-V', 'b', '-c', 'fx', '-T', '-a', 'r10u']
# check for custom parameters
if parameters:
params = parameters.split(' ')
cmd = cmd + params
runSubprocess(cmd, '.', log)
return ' '.join(cmd)
def gb2smurf(input, prot_out, smurf_out):
with open(smurf_out, 'w') as smurf:
with open(prot_out, 'w') as proteins:
with open(input, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
for f in record.features:
name = re.sub('[^0-9]', '', record.name)
if f.type == "CDS":
proteins.write(">%s\n%s\n" % (f.qualifiers['locus_tag'][0], softwrap(
f.qualifiers['translation'][0].rstrip('*'))))
locus_tag = f.qualifiers.get(
"locus_tag", ["No ID"])[0]
product_name = f.qualifiers.get(
"product", ["No Description"])[0]
mystart = f.location.start
myend = f.location.end
strand = f.location.strand
if strand == 1:
smurf.write("%s\t%s\t%s\t%s\t%s\n" % (locus_tag, name.lstrip(
"0"), int(mystart), int(myend), product_name))
else:
smurf.write("%s\t%s\t%s\t%s\t%s\n" % (locus_tag, name.lstrip(
"0"), int(myend), int(mystart), product_name))
def GAGprotClean(input, output):
'''
gag.py v1 had headers like:
>>evm.model.Contig100.1 protein
gag.py v2 has headers like:
>protein|evm.model.scaffold_1.169 ID=evm.model.scaffold_1.169|Parent=evm.TU.scaffold_1.169|Name=EVM%20prediction%20scaffold_1.169
'''
with open(output, 'w') as outfile:
with open(input, 'ru') as infile:
for rec in SeqIO.parse(infile, 'fasta'):
if rec.id.startswith('protein|'):
ID = rec.id.replace('protein|', '').split(' ')[0]
else:
ID = rec.id.split(' ')[0]
rec.id = ID
rec.name = ''
rec.description = ''
SeqIO.write(rec, outfile, 'fasta')
def OldRemoveBadModels(proteins, gff, length, repeats, BlastResults, tmpdir, output):
# first run bedtools to intersect models where 90% of gene overlaps with repeatmasker region
repeat_temp = os.path.join(tmpdir, 'genome.repeats.to.remove.gff')
cmd = ['bedtools', 'intersect', '-f', '0.9', '-a', gff, '-b', repeats]
runSubprocess2(cmd, '.', log, repeat_temp)
# now remove those proteins that do not have valid starts, less then certain length, and have internal stops
remove = []
reason = {}
# parse the results from bedtools and add to remove list
with open(repeat_temp, 'r') as input:
for line in input:
if "\tgene\t" in line:
ninth = line.split('ID=')[-1]
ID = ninth.split(";")[0]
remove.append(ID)
if not ID in reason:
reason[ID] = 'remove_reason=repeat_overlap;'
# parse the results from BlastP search of transposons
with open(BlastResults, 'r') as input:
for line in input:
col = line.split('\t')
remove.append(col[0])
if not col[0] in reason:
ID = col[0].replace('evm.model.', 'evm.TU.')
reason[ID] = 'remove_reason=repeat_match;'
else:
ID = col[0].replace('evm.model.', 'evm.TU.')
reason[ID] = 'remove_reason=repeat_overalap|repeat_match;'
# I'm only seeing these models with GAG protein translations, so maybe that is a problem? skip enforcing start with M
with open(proteins, 'r') as input:
SeqRecords = SeqIO.parse(input, 'fasta')
for rec in SeqRecords:
Seq = str(rec.seq)[:-1]
ID = rec.id.replace('evm.model.', 'evm.TU.')
if len(Seq) < int(length):
remove.append(ID)
if not ID in reason:
reason[ID] = 'remove_reason=seq_too_short;'
if 'XX' in Seq:
remove.append(ID)
if not rec.id in reason:
reason[ID] = 'remove_reason=model_span_gap;'
remove = [w.replace('evm.TU.', '') for w in remove]
remove = [w.replace('evm.model.', '') for w in remove]
remove = set(remove)
if len(remove) > 0:
remove_match = re.compile(r'\b\evm.(.*?:%s)[\.;]\b' % '|'.join(remove))
with open(output, 'w') as out:
with open(os.path.join(tmpdir, 'bad_models.gff'), 'w') as out2:
with open(gff, 'r') as GFF:
for line in GFF:
if '\tstart_codon\t' in line:
continue
if '\tstop_codon\t' in line:
continue
matchLine = remove_match.search(line)
if not matchLine:
# remove the Name attribute as it sticks around in GBK file
line = re.sub(';Name=.*$', ';', line)
out.write(line)
else:
# print matchLine.group()
# print line
if "\tgene\t" in line:
bad_ninth = line.split('ID=')[-1]
bad_ID = bad_ninth.split(";")[0]
bad_reason = reason.get(bad_ID)
if bad_reason:
line = line.replace(
'\n', ';'+bad_reason+'\n')
# print bad_reason
else:
log.debug(
"%s was removed in removeBadModels function for unknown reason, please check manually" % bad_ID)
line = line.replace(
'\n', ';remove_reason=unknown;\n')
# print 'uknown'
out2.write(line)
else: # if nothing to remove, just print out GFF
with open(output, 'w') as out:
with open(gff, 'r') as GFF:
for line in GFF:
if '\tstart_codon\t' in line:
continue
if '\tstop_codon\t' in line:
continue
# remove the Name attribute as it sticks around in GBK file
line = re.sub(';Name=.*$', ';', line)
out.write(line)
def RemoveBadModels(proteins, gff, length, repeats, BlastResults, tmpdir, methods, output):
reason = {}
tooShort = 0
repeat = 0
gapspan = 0
if 'overlap' in methods:
# first run bedtools to intersect models where 90% of gene overlaps with repeatmasker region
repeat_temp = os.path.join(tmpdir, 'genome.repeats.to.remove.gff')
gffSorted = os.path.abspath(gff)+'.sorted.gff'
bedSorted = os.path.abspath(repeats)+'.sorted.bed'
#sortBedproper(repeats, bedSorted)
cmd1 = ['bedtools', 'sort', '-i', repeats]
with open(bedSorted, 'w') as bedout:
subprocess.call(cmd1, stdout=bedout)
#sortGFFproper(gff, gffSorted)
cmd2 = ['bedtools', 'sort', '-i', gff]
with open(gffSorted, 'w') as gffout:
subprocess.call(cmd2, stdout=gffout)
cmd = ['bedtools', 'intersect', '-sorted', '-f', '0.9', '-a', gffSorted, '-b', bedSorted]
runSubprocess2(cmd, '.', log, repeat_temp)
# parse the results from bedtools and add to remove list
with open(repeat_temp, 'r') as input:
for line in input:
if "\tgene\t" in line:
ninth = line.split('ID=')[-1]
ID = ninth.split(";")[0]
if not ID in reason:
reason[ID] = 'remove_reason=repeat_overlap;'
if 'blast' in methods:
# parse the results from BlastP search of transposons
with open(BlastResults, 'r') as input:
for line in input:
col = line.split('\t')
if not col[0] in reason:
ID = col[0].replace('evm.model.', 'evm.TU.')
reason[ID] = 'remove_reason=repeat_match;'
else:
ID = col[0].replace('evm.model.', 'evm.TU.')
reason[ID] = 'remove_reason=repeat_overlap|repeat_match;'
# always do these checks
# Look for models that are too short
with open(proteins, 'r') as input:
SeqRecords = SeqIO.parse(input, 'fasta')
for rec in SeqRecords:
Seq = str(rec.seq)[:-1]
ID = rec.id.replace('evm.model.', 'evm.TU.')
if len(Seq) < int(length):
if not ID in reason:
reason[ID] = 'remove_reason=seq_too_short;'
if 'XX' in Seq:
if not rec.id in reason:
reason[ID] = 'remove_reason=model_span_gap;'
# now read the EVM gene models in Blocks so you can parse gene ID
numTotal = len(reason)
for k, v in reason.items():
if 'model_span_gap' in v:
gapspan += 1
elif 'seq_too_short' in v:
tooShort += 1
else:
repeat += 1
if numTotal > 0:
log.info("Found {:,} gene models to remove: {:,} too short; {:,} span gaps; {:,} transposable elements".format(
numTotal, tooShort, gapspan, repeat))
with open(output, 'w') as out:
with open(os.path.join(tmpdir, 'bad_models.gff'), 'w') as out2:
with open(gff, 'r') as GFF:
for gene_model in readBlocks(GFF, '\n'):
if len(gene_model) > 1:
if gene_model[0].startswith('\n'):
ID = gene_model[1].split(
'ID=')[-1].split(';')[0]
else:
ID = gene_model[0].split(
'ID=')[-1].split(';')[0]
if ID in reason:
out2.write('#%s removed; %s\n' %
(ID, reason.get(ID)))
for line in gene_model:
if not line.startswith('\n'):
out2.write('%s' % (line))
else:
for line in gene_model:
# remove the Name attribute as it sticks around in GBK file
line = re.sub(';Name=.*$', ';', line)
out.write('%s' % (line))
else: # if nothing to remove, just print out GFF
with open(output, 'w') as out:
with open(gff, 'r') as GFF:
for line in GFF:
if '\tstart_codon\t' in line:
continue
if '\tstop_codon\t' in line:
continue
# remove the Name attribute as it sticks around in GBK file
line = re.sub(';Name=.*$', ';', line)
out.write(line)
def CleantRNAtbl(GFF, TBL, output):
# clean up genbank tbl file from gag output
# try to read through GFF file, make dictionary of tRNA genes and products
TRNA = {}
matches = []
with open(GFF, 'r') as gff:
for line in gff:
if line.startswith('#'):
continue
line = line.replace('\n', '')
scaffold, source, feature, start, end, score, orientation, phase, info = line.split(
'\t')
if feature == 'tRNA':
ID = info.split(';')[0].replace('ID=', '')
ID = ID.replace('-T1', '')
product = info.split('product=')[-1]
TRNA[ID] = product
matches.append(product)
matches = set(matches)
tRNAmatch = re.compile(r'\t\t\tproduct\t%s\n' % '|'.join(matches))
with open(output, 'w') as out:
with open(TBL, 'r') as input:
for line in input:
if line.startswith('\t\t\tlocus_tag\t'):
out.write(line)
geneID = line.split('locus_tag\t')[-1].replace('\n', '')
if geneID in TRNA:
CurrentProduct = TRNA.get(geneID)
if 'tRNA-Xxx' == CurrentProduct:
out.write("\t\t\tpseudo\n")
elif line.startswith("\t\t\tproduct\ttRNA-Xxx"):
out.write(line)
out.write("\t\t\tpseudo\n")
next(input)
next(input)
elif tRNAmatch.search(line):
out.write(line)
next(input)
next(input)
else: # otherwise just write line
out.write(line)
def getFailedProductNames(input, GeneDict):
# input is NCBI tbl2asn discrepency report, parse to get suspect product names
failed = {}
with open(input, 'r') as discrep:
for block in readBlocks(discrep, 'DiscRep_'):
if 'DiscRep_SUB:SUSPECT_PRODUCT_NAMES::' in block[0]:
reason = []
for item in block:
if item.startswith('DiscRep_SUB:'):
bad = item.split('::')[-1].rstrip()
if 'features' in bad.lower():
bad = bad.split('features ')[-1]
reason.append(bad)
elif item.startswith('genome:'):
gene = item.split('\t')[-1].strip()
if gene.startswith('DiscRep'):
continue
if gene in GeneDict:
hit = GeneDict.get(gene)
if not hit[0] in failed:
failed[hit[0]] = (hit[1], gene, reason)
return failed
def ParseErrorReport(input, Errsummary, val, Discrep, output, keep_stops):
errors = []
gapErrors = []
remove = []
with open(Errsummary) as summary:
for line in summary:
if 'ERROR' in line:
# there are probably other errors you are unaware of....
if 'SEQ_DESCR.OrganismIsUndefinedSpecies' in line or 'SEQ_DESCR.BadOrgMod' in line or 'SEQ_FEAT.MissingTrnaAA' in line or 'SEQ_INST.TerminalNs' in line:
pass
elif 'SEQ_FEAT.NoStop' in line:
if keep_stops:
pass
else:
err = line.split(" ")[-1].rstrip()
errors.append(err)
elif 'SEQ_FEAT.FeatureBeginsOrEndsInGap' in line:
err = line.split(" ")[-1].rstrip()
gapErrors.append(err)
else:
err = line.split(" ")[-1].rstrip()
errors.append(err)
# parse the discrepency report and look for overlapping genes, so far, all have been tRNA's in introns, so just get those for now.
with open(Discrep, 'r') as discrep:
# process discrepency report into blocks, then look for block headers where overlapping genes are, remove only tRNA models right now
for block in readBlocks(discrep, 'DiscRep_'):
if 'DiscRep_ALL:OVERLAPPING_GENES::' in block[0] or 'DiscRep_SUB:RNA_CDS_OVERLAP::' in block[0]:
for item in block:
if item.startswith('genome:tRNA'):
gene = item.split('\t')[-1].replace('\n', '')
if gene.startswith('DiscRep'):
continue
tRNA = gene + '_tRNA'
exon = gene + '_exon'
remove.append(gene)
remove.append(tRNA)
remove.append(exon)
if 'DiscRep_ALL:FIND_OVERLAPPED_GENES::' in block[0]:
for item in block:
gene = item.split('\t')[-1].replace('\n', '')
if gene.startswith('DiscRep'):
continue
tRNA = gene + '_tRNA'
exon = gene + '_exon'
remove.append(gene)
remove.append(tRNA)
remove.append(exon)
# there are no errors, then just remove stop/start codons and move on
if len(errors) < 1 and len(remove) < 1:
with open(output, 'w') as out:
with open(input, 'r') as GFF:
for line in GFF:
if '\tstart_codon\t' in line:
continue
if '\tstop_codon\t' in line:
continue
out.write(line)
else:
with open(val) as validate:
for line in validate:
if any(x in line for x in errors):
mRNA = line.split("ncbi|")[-1].replace(']', '').rstrip()
gene = mRNA.replace('evm.model', 'evm.TU')
exon = mRNA + '.exon'
mRNA = mRNA + ';'
remove.append(mRNA)
remove.append(gene)
remove.append(exon)
# this is only picking up tRNAs right now, which "probably" is all that it needs to.....but u never know
if any(x in line for x in gapErrors):
cols = line.split(' ')
if 'Gene:' in cols:
gene = line.split('Gene: ')[-1]
gene = gene.split(' ')[0]
tRNA = gene + '_tRNA'
exon = gene + '_exon'
remove.append(gene)
remove.append(tRNA)
remove.append(exon)
# make sure no empty strings
remove = list([_f for _f in remove if _f])
remove = set(remove)
remove_match = re.compile(r'\b(?:%s)+\b' % '|'.join(remove))
with open(output, 'w') as out:
with open(input, 'r') as GFF:
for line in GFF:
if '\tstart_codon\t' in line:
continue
if '\tstop_codon\t' in line:
continue
if not remove_match.search(line):
if '\tgene\t' in line:
line = line.replace('Name=;', '')
out.write(line)
def antismash_version(input):
# choose v4, v5 or v6 parser
version = 4
with open(input, 'r') as infile:
for rec in SeqIO.parse(infile, 'genbank'):
if 'structured_comment' in rec.annotations:
if 'antiSMASH-Data' in rec.annotations['structured_comment']:
version = int(
rec.annotations['structured_comment']['antiSMASH-Data']['Version'].split('.')[0])
break
return version
def ParseAntiSmash(input, tmpdir, output, annotations):
smash_version = antismash_version(input)
log.info("Now parsing antiSMASH v{:} results, finding SM clusters".format(smash_version))
BackBone = {}
SMCOGs = {}
bbSubType = {}
bbDomains = {}
smProducts = {}
backboneCount = 0
clusterCount = 0
cogCount = 0
# parse antismash genbank to get clusters in bed format and slice the record for each cluster prediction
with open(output, 'w') as antibed:
with open(input, 'r') as input:
SeqRecords = SeqIO.parse(input, 'genbank')
for rec_num,record in enumerate(SeqRecords):
for f in record.features:
locusTag, ID, Parent = (None,)*3
if smash_version < 6:
baseName = 'Cluster'
if '_' in record.id:
try:
numericalContig = '{}_{}'.format(baseName, int(record.id.rsplit('_', 1)[-1]))
except ValueError:
if '.' in record.id:
numericalContig = '{}_{}'.format(baseName, int(record.id.rsplit('.', 1)[0].rsplit('_', 1)[-1]))
else: # just get the numbers
numericalContig = '{}_{}'.format(baseName, int(''.join(filter(str.isdigit, record.id))))
else:
numericalContig = 'Cluster'
# parse v4 differently than version 5
if smash_version == 4:
if f.type == "cluster":
clusterCount += 1
chr = record.id
start = f.location.nofuzzy_start
end = f.location.nofuzzy_end
clusternum = f.qualifiers.get(
"note")[0].replace("Cluster number: ", "")
antibed.write("%s\t%s\t%s\tCluster_%s\t0\t+\n" %
(chr, start, end, clusternum))
Domains = []
if f.type == "CDS":
locusTag, ID, Parent = getID(f, f.type)
if not ID:
continue
ID = ID.replace('ncbi_', '')
if f.qualifiers.get('sec_met'):
for k, v in list(f.qualifiers.items()):
if k == 'sec_met':
for i in v:
if i.startswith('Type:'):
type = i.replace('Type: ', '')
backboneCount += 1
BackBone[ID] = type
if i.startswith('NRPS/PKS subtype:'):
subtype = i.replace(
'NRPS/PKS subtype: ', '')
bbSubType[ID] = subtype
if i.startswith('NRPS/PKS Domain:'):
doms = i.replace(
'NRPS/PKS Domain: ', '')
doms = doms.split('. ')[0]
Domains.append(doms)
bbDomains[ID] = Domains
for k, v in list(f.qualifiers.items()):
if k == 'note':
for i in v:
if i.startswith('smCOG:'):
COG = i.replace('smCOG: ', '')
COG = COG.split(' (')[0]
SMCOGs[ID] = COG
cogCount += 1
elif not i.startswith('smCOG tree'):
notes = i
smProducts[ID] = notes
elif smash_version >= 5:
if f.type == "protocluster":
clusterCount += 1
chr = record.id
start = f.location.nofuzzy_start
# if '<' in start:
# start = start.replace('<', '')
end = f.location.nofuzzy_end
# if '>' in end:
# end = end.replace('>', '')
clusternum = int(f.qualifiers.get(
"protocluster_number")[0])
if smash_version >= 6:
antibed.write("{:}\t{:}\t{:}\t{:}_{:}\t0\t+\n".format(
chr, start, end, numericalContig, clusternum))
else:
antibed.write("{:}\t{:}\t{:}\t{:}.{:}\t0\t+\n".format(
chr, start, end, numericalContig, clusternum))
Domains = []
if f.type == "CDS":
locusTag, ID, Parent = getID(f, f.type)
if not ID:
continue
ID = ID.replace('ncbi_', '')
if f.qualifiers.get('NRPS_PKS'):
for k, v in list(f.qualifiers.items()):
if k == 'NRPS_PKS':
for i in v:
if i.startswith('type:'):
type = i.replace('type: ', '')
backboneCount += 1
BackBone[ID] = type
if i.startswith('NRPS_PKS subtype:'):
subtype = i.replace(
'NRPS_PKS subtype: ', '')
bbSubType[ID] = subtype
if i.startswith('Domain:'):
doms = i.replace(
'Domain: ', '')
doms = doms.split('. ')[0]
Domains.append(doms)
bbDomains[ID] = Domains
for k, v in list(f.qualifiers.items()):
if k == 'gene_functions':
for i in v:
if '(smcogs)' in i:
COG = i.split(
'(smcogs)')[-1].strip()
COG = COG.split(' (')[0]
SMCOGs[ID] = COG
cogCount += 1
elif k == 'gene_kind':
if 'biosynthetic' in v:
backboneCount += 1
# if smash_version == 4:
log.info("Found %i clusters, %i biosynthetic enyzmes, and %i smCOGs predicted by antiSMASH" % (
clusterCount, backboneCount, cogCount))
# now generate the annotations to add to genome
with open(annotations, 'w') as out:
# add product annotations - use bbSubType --> BackBone
for k, v in natsorted(list(BackBone.items())):
ID = k
if k in bbSubType:
hit = bbSubType.get(k)
if hit == 'NRPS':
hit = 'Nonribosomal Peptide Synthase (NRPS)'
if hit == 'Type I Iterative PKS':
hit = 'Type I Iterative Polyketide synthase (PKS)'
else:
hit = v
if hit == 'terpene':
hit = 'terpene cyclase'
elif hit == 'other':
hit = 'putative secondary metabolism biosynthetic enzyme'
elif hit == 'indole':
hit = 'aromatic prenyltransferase (DMATS family)'
elif hit == 'alkaloid' or hit == 'lignan' or hit == 'saccharide' or hit == 'polyketide':
hit = 'putative ' + hit + ' biosynthetic cluster'
elif hit == 'putative':
hit = 'putative uncategorized biosynthetic cluster'
elif '-' in hit:
hit = 'putative ' + hit + ' biosynthetic cluster'
if hit != 'none':
out.write("%s\tproduct\t%s\n" % (ID, hit))
# add annots from smProducts
for k, v in list(smProducts.items()):
ID = k
if v != 'none' and not 'BLAST' in v:
sys.stdout.write("%s\tproduct\t%s\n" % (ID, v))
# add smCOGs into note section
for k, v in list(SMCOGs.items()):
ID = k
if v != 'none':
out.write("%s\tnote\t%s\n" % (ID, v))
return bbDomains, bbSubType, BackBone
def GetClusterGenes(input, GFF, genome, annotations):
# load clusters into InterLap
interClust = bed2interlapNames(input)
# load GFF3 into Dictionary
Genes = {}
Genes = gff2dict(GFF, genome, Genes)
# loop through genes and check if in Clusters
dictClusters = {}
for k, v in natsorted(Genes.items()):
if v['type'] == 'mRNA':
if v['location'] in interClust[v['contig']]:
best_hit = list(interClust[v['contig']].find(v['location']))[0]
clusterName = best_hit[2]
if not clusterName in dictClusters:
dictClusters[clusterName] = v['ids']
else:
dictClusters[clusterName] += v['ids']
# write the output file
with open(annotations, 'w') as annotout:
for k, v in list(dictClusters.items()):
for i in v:
annotout.write("%s\tnote\tantiSMASH:%s\n" % (i, k))
return dictClusters
def splitFASTA(input, outputdir):
if not os.path.isdir(outputdir):
os.makedirs(outputdir)
with open(input, 'r') as InputFasta:
SeqRecords = SeqIO.parse(InputFasta, 'fasta')
for record in SeqRecords:
name = str(record.id)
outputfile = os.path.join(outputdir, name+'.fa')
with open(outputfile, 'w') as output:
SeqIO.write(record, output, 'fasta')
def genomeStats(input):
from Bio.SeqUtils import GC
lengths = []
GeeCee = []
Genes = 0
tRNA = 0
Prots = 0
locus_tag = ''
organism = None
isolate = None
strain = None
uniqueIso = None
with open(input, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
lengths.append(len(record.seq))
GeeCee.append(str(record.seq))
organism = record.annotations['organism'].replace(
' Unclassified.', '')
for f in record.features:
if f.type == "source":
isolate = f.qualifiers.get("isolate", [None])[0]
strain = f.qualifiers.get("strain", [None])[0]
if f.type == "CDS":
Prots += 1
if f.type == "gene":
Genes += 1
if Genes == 1:
locus_tag = f.qualifiers.get("locus_tag")[
0].split('_')[0]
if f.type == "tRNA":
tRNA += 1
if strain:
log.info("working on %s %s" % (organism, strain))
uniqueIso = strain.replace(' ', '')
elif isolate:
log.info("working on %s %s" % (organism, isolate))
uniqueIso = isolate.replace(' ', '')
else:
log.info("working on %s" % organism)
GenomeSize = sum(lengths)
LargestContig = max(lengths)
ContigNum = len(lengths)
AvgContig = int(round(GenomeSize / ContigNum))
pctGC = round(GC("".join(GeeCee)), 2)
# now get N50
lengths.sort()
nlist = []
for x in lengths:
nlist += [x]*x
if len(nlist) % 2 == 0:
medianpos = int(len(nlist) / 2)
N50 = int((nlist[medianpos] + nlist[medianpos-1]) / 2)
else:
medianpos = int(len(nlist) / 2)
N50 = int(nlist[medianpos])
# return values in a list
return [organism, uniqueIso, locus_tag, "{0:,}".format(GenomeSize)+' bp', "{0:,}".format(LargestContig)+' bp', "{0:,}".format(AvgContig)+' bp', "{0:,}".format(ContigNum), "{0:,}".format(N50)+' bp', "{:.2f}".format(pctGC)+'%', "{0:,}".format(Genes), "{0:,}".format(Prots), "{0:,}".format(tRNA)]
def MEROPS2dict(input):
dict = {}
with open(input, 'r') as fasta:
for line in fasta:
if line.startswith('>'):
cols = line.split(' ')
ID = cols[0].replace('>', '')
family = cols[1].replace('\n', '')
dict[ID] = family
return dict
def getEggNogfromNote(input):
dict = {}
with open(input, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
for f in record.features:
if f.type == 'CDS':
try:
ID = f.qualifiers['locus_tag'][0]
except KeyError:
log.debug("%s has no locus_tag, skipping")
continue
for k, v in list(f.qualifiers.items()):
if k == 'note':
notes = v[0].split('; ')
for i in notes:
if i.startswith('EggNog:'):
hit = i.replace('EggNog:', '')
if not ID in dict:
dict[ID] = hit
return dict
def getStatsfromNote(input, word, Database):
dict = {}
meropsDict = MEROPS2dict(os.path.join(Database, 'merops.formatted.fa'))
with open(input, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
for f in record.features:
if f.type == 'CDS':
try:
ID = f.qualifiers['locus_tag'][0]
except KeyError:
log.debug("%s has no locus_tag, skipping")
continue
for k, v in list(f.qualifiers.items()):
if k == 'note':
notes = v[0].split('; ')
for i in notes:
if i.startswith(word+':'):
hit = i.replace(word+':', '')
if hit.startswith('MER'): # change to family name
hit = meropsDict.get(hit)
if not hit in dict:
dict[hit] = [ID]
else:
dict[hit].append(ID)
return dict
def getSMBackbones(input):
dict = {'NRPS': 0, 'PKS': 0, 'Hybrid': 0}
with open(input, 'r') as gbk:
for record in SeqIO.parse(gbk, 'genbank'):
for f in record.features:
if f.type == 'CDS':
product = f.qualifiers['product'][0]
if not product == 'hypothetical protein':
if product == "Hybrid PKS-NRPS":
dict['Hybrid'] += 1
if product == "Nonribosomal Peptide Synthase (NRPS)":
dict['NRPS'] += 1
if 'Polyketide synthase (PKS)' in product:
dict['PKS'] += 1
return dict
def parseGOterms(input, folder, genome):
with open(os.path.join(folder, 'associations.txt'), 'a') as assoc:
with open(os.path.join(folder, genome+'.txt'), 'w') as terms:
with open(input, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
for f in record.features:
if f.type == 'CDS':
try:
ID = f.qualifiers['locus_tag'][0]
except KeyError:
log.debug("%s has no locus_tag, skipping")
continue
GOS = []
for k, v in list(f.qualifiers.items()):
if k == 'note':
notes = v[0].split('; ')
for i in notes:
if i.startswith('GO'):
go_term = i.split(' ')[1]
GOS.append(go_term)
if GOS:
assoc.write("%s\t%s\n" % (ID, ";".join(GOS)))
terms.write("%s\n" % ID)
def getStatsfromDbxref(input, word):
dict = {}
with open(input, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
for f in record.features:
if f.type == 'CDS':
try:
ID = f.qualifiers['locus_tag'][0]
except KeyError:
log.debug("%s has no locus_tag, skipping")
continue
for k, v in list(f.qualifiers.items()):
if k == 'db_xref':
for i in v:
if i.startswith(word+':'):
hit = i.replace(word+':', '')
if not hit in dict:
dict[hit] = [ID]
else:
dict[hit].append(ID)
return dict
def getGBKannotation(input, Database):
'''
Function will loop through GBK file pulling out funannotate functional annotation
and returning a list of dictionaries for each annotation class
'''
# convert merops on the fly, need database
meropsDict = MEROPS2dict(os.path.join(Database, 'merops.formatted.fa'))
SMs = {'NRPS': 0, 'PKS': 0, 'Hybrid': 0}
pfams = {}
iprs = {}
nogs = {}
cogs = {}
merops = {}
cazys = {}
secreted = {}
membrane = {}
buscos = {}
secmet = {}
with open(input, 'r') as infile:
for record in SeqIO.parse(infile, 'genbank'):
for f in record.features:
locusTag, ID, Parent = (None,)*3
if f.type == 'CDS':
locusTag, ID, Parent = getID(f, f.type)
if not ID:
continue
product = f.qualifiers['product'][0]
if product == "Hybrid PKS-NRPS":
SMs['Hybrid'] += 1
if product == "Nonribosomal Peptide Synthase (NRPS)":
SMs['NRPS'] += 1
if 'Polyketide synthase (PKS)' in product:
SMs['PKS'] += 1
for k, v in list(f.qualifiers.items()):
if k == 'db_xref':
for i in v:
if i.startswith('PFAM:'):
hit = i.replace('PFAM:', '')
if not hit in pfams:
pfams[hit] = [ID]
else:
pfams[hit].append(ID)
elif i.startswith('InterPro:'):
hit = i.replace('InterPro:', '')
if not hit in iprs:
iprs[hit] = [ID]
else:
iprs[hit].append(ID)
if k == 'note':
notes = v[0].split('; ')
for i in notes:
if i.startswith('EggNog:'):
hit = i.replace('EggNog:', '')
if not ID in nogs:
nogs[ID] = hit
elif i.startswith('BUSCO:'):
hit = i.replace('BUSCO:', '')
if not hit in buscos:
buscos[hit] = [ID]
else:
buscos[hit].append(ID)
elif i.startswith('MEROPS:'): # change to family name
hit = i.replace('MEROPS:', '')
hit = meropsDict.get(hit)
if not hit in merops:
merops[hit] = [ID]
else:
merops[hit].append(ID)
elif i.startswith('CAZy:'):
hit = i.replace('CAZy:', '')
if not hit in cazys:
cazys[hit] = [ID]
else:
cazys[hit].append(ID)
elif i.startswith('COG:'):
hit = i.replace('COG:', '')
hits = hit.split(',')
for x in hits:
if not x in cogs:
cogs[x] = [ID]
else:
cogs[x].append(ID)
elif i.startswith('SECRETED:'):
hit = i.replace('SECRETED:', '')
if not hit in secreted:
secreted[hit] = [ID]
else:
secreted[hit].append(ID)
elif i.startswith('TransMembrane:'):
hit = i.replace('TransMembrane:', '')
if not hit in membrane:
membrane[hit] = [ID]
else:
membrane[hit].append(ID)
elif i.startswith('antiSMASH:'):
hit = i.replace('antiSMASH:', '')
if not hit in secmet:
secmet[hit] = [ID]
else:
secmet[hit].append(ID)
return [pfams, iprs, nogs, buscos, merops, cazys, cogs, secreted, membrane, secmet, SMs]
def annotationtable(input, Database, HeaderNames, InterProDict, output):
from collections import OrderedDict
'''
Function will create a tsv annotation table from GenBank file
trying to capture all annotation in a parsable tsv file or
something that could be imported into excel
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
# convert merops on the fly, need database
meropsDict = MEROPS2dict(os.path.join(Database, 'merops.formatted.fa'))
# get note new/unique note names
uniqueNotes = OrderedDict()
for x in HeaderNames:
if not x in ['BUSCO', 'CAZy', 'COG', 'EggNog', 'SECRETED', 'GO', 'MEROPS', 'TransMembrane']:
uniqueNotes[x] = []
# load genbank into funannotate dictionary (required as we need transcript/cds/etc)
Genes = {}
with open(input, 'r') as gbk:
for record in SeqIO.parse(gbk, 'genbank'):
for f in record.features:
gb_feature_add2dict(f, record, Genes)
SeqRecords = SeqIO.to_dict(SeqIO.parse(input, 'genbank'))
sGenes = natsorted(Genes.items(), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
# input should be fully annotation GBK file from funannotate
with open(output, 'w') as outfile:
header = ['GeneID', 'TranscriptID', 'Feature', 'Contig', 'Start',
'Stop', 'Strand', 'Name', 'Product', 'Alias/Synonyms', 'EC_number',
'BUSCO', 'PFAM', 'InterPro', 'EggNog', 'COG', 'GO Terms',
'Secreted', 'Membrane', 'Protease', 'CAZyme']
header += uniqueNotes.keys()
header += ['Notes', 'gDNA', 'mRNA', 'CDS-transcript', 'Translation']
outfile.write('%s\n' % '\t'.join(header))
for k,v in sortedGenes.items():
for i in range(0,len(v['ids'])):
# for each new feature, start with empty lists
pfams = []
iprs = []
GOS = v['go_terms'][i]
nogs = []
cogs = []
merops = []
cazys = []
secreted = []
membrane = []
therest = []
buscos = []
ecnum = []
alias = []
for key,value in uniqueNotes.items():
uniqueNotes[key] = []
# now grab the data
for y in v['db_xref'][i]:
if y.startswith('PFAM:'):
hit = y.replace('PFAM:', '')
pfams.append(hit)
elif y.startswith('InterPro:'):
hit = y.replace('InterPro:', '')
# look up description in dictionary
desc = InterProDict.get(hit)
iprs.append('{:} {:}'.format(hit, desc))
for y in v['gene_synonym']:
alias.append(y)
for y in v['EC_number'][i]:
ecnum.append(y)
for y in v['note'][i]:
if y.startswith('EggNog:'):
hit = y.replace('EggNog:', '')
nogs.append(hit)
elif y.startswith('BUSCO:'):
hit = y.replace('BUSCO:', '')
buscos.append(hit)
elif y.startswith('MEROPS:'): # change to family name
hit = y.replace('MEROPS:', '')
if hit in meropsDict:
hit = meropsDict.get(hit)
merops.append(hit)
else:
log.error("MEROPS database inconsistency: %s not found" % hit)
elif y.startswith('CAZy:'):
hit = y.replace('CAZy:', '')
cazys.append(hit)
elif y.startswith('COG:'):
hit = y.replace('COG:', '')
hits = hit.split(',')
for x in hits:
desc = x + ':'+ resources.COGS.get(x)
cogs.append(desc)
elif y.startswith('SECRETED:'):
hit = y.replace('SECRETED:', '')
secreted.append(hit)
elif y.startswith('TransMembrane:'):
hit = y.replace('TransMembrane:', '')
membrane.append(hit)
elif y.startswith(tuple(uniqueNotes.keys())):
try:
n = y.split(':')[0]
hit = y.split(':', 1)[1]
uniqueNotes[n].append(hit)
except IndexError:
hit = y
therest.append(hit)
else: # capture everything else
hit = y
therest.append(hit)
# bring together output
result = [k, v['ids'][i], v['type'], v['contig'],
str(v['location'][0]), str(v['location'][1]),
v['strand'], v['name'],
v['product'][i],';'.join(alias),
';'.join(ecnum),';'.join(buscos),
';'.join(pfams),';'.join(iprs),
';'.join(nogs),';'.join(cogs),
';'.join(GOS),
';'.join(secreted),
';'.join(membrane),
';'.join(merops),
';'.join(cazys)
]
for key,value in uniqueNotes.items():
result.append(';'.join(value))
gDNA = getSeqRegions(SeqRecords, v['contig'], [v['location']])
try:
Transcript = str(v['transcript'][i])
except IndexError:
if v['cds_transcript'][i]:
Transcript = str(v['cds_transcript'][i])
else:
print('{:} has no mrna or cds transcript'.format(k))
pass
if v['type'] == 'mRNA':
CDSTranscript = str(v['cds_transcript'][i])
try:
Protein = v['protein'][i]
except IndexError:
Protein = ''
print('ERROR: No amino acid sequence exists for {}'.format(v['ids'][i]))
else:
CDSTranscript = ''
Protein = ''
if v['strand'] == '-':
gDNA = RevComp(gDNA)
Transcript = RevComp(Transcript)
CDSTranscript = RevComp(CDSTranscript)
result += [';'.join(therest), gDNA, Transcript,
CDSTranscript, Protein]
# convert any None's to empty string
result = ['' if x is None else x for x in result]
# write to file
outfile.write('%s\n' % '\t'.join(result))
def annotationtableOld(input, Database, output):
'''
Function will create a tsv annotation table from GenBank file
trying to capture all annotation in a parsable tsv file or
something that could be imported into excel
'''
# convert merops on the fly, need database
meropsDict = MEROPS2dict(os.path.join(Database, 'merops.formatted.fa'))
# input should be fully annotation GBK file from funannotate
with open(output, 'w') as outfile:
header = ['GeneID', 'Feature', 'Contig', 'Start', 'Stop', 'Strand', 'Name', 'Product', 'BUSCO', 'PFAM',
'InterPro', 'EggNog', 'COG', 'GO Terms', 'Secreted', 'Membrane', 'Protease', 'CAZyme', 'Notes', 'Translation']
outfile.write('%s\n' % '\t'.join(header))
for record in SeqIO.parse(input, 'genbank'):
Contig = record.id
for f in record.features:
if f.type in ['tRNA', 'ncRNA', 'rRNA']:
ID = f.qualifiers['locus_tag'][0]
Start = f.location.nofuzzy_start
End = f.location.nofuzzy_end
strand = f.location.strand
if strand == 1:
Strand = '+'
elif strand == -1:
Strand = '-'
try:
Product = f.qualifiers['product'][0]
except KeyError:
Product = "None"
result = [ID, f.type, Contig, str(Start), str(
End), Strand, '', Product, '', '', '', '', '', '', '', '', '', '', '', '']
outfile.write('%s\n' % '\t'.join(result))
if f.type == 'CDS':
ID = f.qualifiers['locus_tag'][0]
Start = f.location.nofuzzy_start
End = f.location.nofuzzy_end
strand = f.location.strand
if strand == 1:
Strand = '+'
elif strand == -1:
Strand = '-'
try:
Product = f.qualifiers['product'][0]
except KeyError:
Product = 'hypothetical protein'
try:
Name = f.qualifiers['gene'][0]
except KeyError:
Name = ''
try:
Translation = f.qualifiers['translation'][0]
except KeyError:
Translation = ''
pfams = []
iprs = []
GOS = []
nogs = []
cogs = []
merops = []
cazys = []
secreted = []
membrane = []
therest = []
buscos = []
for k, v in list(f.qualifiers.items()):
if k == 'db_xref':
for i in v:
if i.startswith('PFAM:'):
hit = i.replace('PFAM:', '')
pfams.append(hit)
elif i.startswith('InterPro:'):
hit = i.replace('InterPro:', '')
iprs.append(hit)
elif k == 'note':
notes = v[0].split('; ')
for i in notes:
if i.startswith('GO'):
go_term = i.split(' ')[1]
GOS.append(go_term)
elif i.startswith('EggNog:'):
hit = i.replace('EggNog:', '')
nogs.append(hit)
elif i.startswith('BUSCO:'):
hit = i.replace('BUSCO:', '')
buscos.append(hit)
elif i.startswith('MEROPS:'): # change to family name
hit = i.replace('MEROPS:', '')
if hit in meropsDict:
hit = meropsDict.get(hit)
merops.append(hit)
else:
log.error(
"MEROPS database inconsistency: %s not found" % hit)
elif i.startswith('CAZy:'):
hit = i.replace('CAZy:', '')
cazys.append(hit)
elif i.startswith('COG:'):
hit = i.replace('COG:', '')
hits = hit.split(',')
for x in hits:
desc = x + ':' + resources.COGS.get(x)
cogs.append(desc)
elif i.startswith('SECRETED:'):
hit = i.replace('SECRETED:', '')
secreted.append(hit)
elif i.startswith('TransMembrane:'):
hit = i.replace('TransMembrane:', '')
membrane.append(hit)
else: # capture everything else
hit = i
therest.append(hit)
result = [ID, 'CDS', Contig, str(Start), str(End), Strand, Name, Product, ';'.join(buscos), ';'.join(pfams), ';'.join(iprs), ';'.join(
nogs), ';'.join(cogs), ';'.join(GOS), ';'.join(secreted), ';'.join(membrane), ';'.join(merops), ';'.join(cazys), ';'.join(therest), Translation]
outfile.write('%s\n' % '\t'.join(result))
def ncbiCheckErrors(error, validation, genename, fixOut):
ncbi_error = 0
actual_error = 0
with open(error, 'r') as errors:
for line in errors:
line = line.strip()
if 'ERROR' in line:
num = line.split(' ')[0]
ncbi_error += int(num)
# if errors in summary, then parse validation report, only get errors with gene names
if ncbi_error > 0:
# see if we can get the gene models that need to be fixed
needFixing = {}
with open(validation, 'r') as validationFile:
for line in validationFile:
line = line.strip()
if line.startswith('ERROR') and genename in line:
actual_error += 1
parts = line.split(' ')
for x in parts:
if genename in x:
ID = x.split('|')[-1]
if '-' in ID:
ID = ID.split('-')[0]
reason = line.split(' FEATURE:')[0]
reason = reason.split('] ')[-1]
if not ID in needFixing:
needFixing[ID] = reason
if actual_error > 0:
log.info("There are %i gene models that need to be fixed." %
actual_error)
print('-------------------------------------------------------')
with open(fixOut, 'w') as fix:
fix.write('#GeneID\tError Message\n')
for k, v in natsorted(list(needFixing.items())):
fix.write('%s\t%s\n' % (k, v))
print(('%s\t%s' % (k, v)))
return actual_error
def convert2counts(input):
import pandas as pd
Counts = []
for i in range(0, len(input)):
dict = {}
for k, v in list(input[i].items()):
dict[k] = len(v)
Counts.append(dict)
df = pd.DataFrame(Counts)
df.fillna(0, inplace=True) # fill in zeros for missing data
return df
def gb2proteinortho(input, folder, name):
gffOut = os.path.join(folder, name+'.gff')
FastaOut = os.path.join(folder, name+'.faa')
Transcripts = os.path.join(folder, name+'.transcripts.fa')
genes = {}
with open(input, 'r') as gbk:
for record in SeqIO.parse(gbk, 'genbank'):
for f in record.features:
gb_feature_add2dict(f, record, genes)
# now output the files you need
with open(gffOut, 'w') as gff:
with open(FastaOut, 'w') as fasta:
with open(Transcripts, 'w') as transcripts:
for k, v in natsorted(list(genes.items())):
if v['type'] == 'mRNA':
for i, item in enumerate(v['ids']):
transcripts.write(">{:} {:} codon_start={:} strand={:}\n{:}\n".format(
item, k, v['codon_start'][i], v['strand'], v['cds_transcript'][i]))
fasta.write(">%s %s\n%s\n" %
(item, k, v['protein'][i]))
gff.write("{:}\t{:}\tCDS\t{:}\t{:}\t.\t{:}\t.\tID={:};Parent={:};product={:};\n".format(
v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], item, k, v['product'][i]))
def drawStackedBar(panda, type, labels, ymax, output, colors=False):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
import numpy as np
from funannotate.stackedBarGraph import StackedBarGrapher as StackedBarGrapher
# stackedbargraph from summary data
SBG = StackedBarGrapher()
# labels
d_labels = panda.index.values
# y-ticks
ticks = np.linspace(0, ymax, 6)
ticks = list(ticks)
nums = [int(x) for x in ticks]
vals = [str(x) for x in nums]
yticks = [nums, vals]
# colors
if not colors:
color_palette = sns.hls_palette(
len(panda.columns), l=.4, s=.8).as_hex()
color_palette = [str(x).upper() for x in color_palette]
else:
color_palette = colors
# set up plot
sns.set_style('darkgrid')
sns.set_context('paper')
fig = plt.figure()
ax = fig.add_subplot(111)
YLabel = "Number of "+type
SBG.stackedBarPlot(ax, panda, color_palette, xLabels=panda.index.values,
endGaps=True, gap=0.25, xlabel="Genomes", ylabel=YLabel, yTicks=yticks)
plt.title(type+" summary")
# get the legend
legends = []
i = 0
for column in panda.columns:
legends.append(mpatches.Patch(
color=color_palette[i], label=panda.columns.values[i] + ": " + labels.get(panda.columns.values[i])))
i += 1
lgd = ax.legend(handles=legends, fontsize=6, loc='upper left',
bbox_to_anchor=(1.02, 1), borderaxespad=0)
plt.ylim([0, ymax])
# set the font size - i wish I knew how to do this proportionately.....but setting to something reasonable.
for item in ax.get_xticklabels():
item.set_fontsize(8)
# setup the plot
fig.subplots_adjust(bottom=0.4)
fig.savefig(output, format='pdf', bbox_extra_artists=(
lgd,), bbox_inches='tight')
plt.close(fig)
def drawHeatmap(df, color, output, labelsize, annotate):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
# get size of table
width = len(df.columns) / 2
height = len(df.index) / 4
fig, ax = plt.subplots(figsize=(width, height))
cbar_ax = fig.add_axes(shrink=0.4)
if annotate:
sns.heatmap(df, linewidths=0.5, cmap=color, ax=ax,
fmt="d", annot_kws={"size": 4}, annot=True)
else:
sns.heatmap(df, linewidths=0.5, cmap=color, ax=ax, annot=False)
plt.yticks(rotation=0)
plt.xticks(rotation=90)
for item in ax.get_xticklabels():
item.set_fontsize(8)
for item in ax.get_yticklabels():
item.set_fontsize(int(labelsize))
fig.savefig(output, format='pdf', dpi=1000, bbox_inches='tight')
plt.close(fig)
def donutplot(df, LongName, output, colors=False):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
# create data
longnames = []
for x in df.columns.tolist():
if x in LongName:
longnames.append(LongName.get(x))
else:
longnames.append(x)
names = df.columns.tolist()
data = df.values.tolist()
species = df.index.values
# get size of table
categories = len(df.columns)
total = len(df.index)
Rows = total // 2
Rows += total % 2
Position = list(range(1, total+1))
# get colors figured out
if not colors:
color_palette = resources.pref_colors
else:
color_palette = colors
# draw figure
if len(species) < 3:
fig = plt.figure(1, figsize=(8, 4))
else:
fig = plt.figure(1, figsize=(8, 8))
for k in range(total):
ax = fig.add_subplot(Rows, 2, Position[k])
# Create a circle for the center of the plot
my_circle = plt.Circle((0, 0), 0.7, color='white')
plt.pie(data[0], labels=names, colors=color_palette)
p = plt.gcf()
p.gca().add_artist(my_circle)
plt.title(species[k])
patches = [mpatches.Patch(color=color_palette[i], label="{:s}".format(
longnames[i])) for i in range(len(longnames))]
plt.legend(handles=patches, bbox_to_anchor=(1, 0.5),
bbox_transform=fig.transFigure, loc="center left", ncol=1)
fig.savefig(output, format='pdf', dpi=1000, bbox_inches='tight')
plt.close(fig)
def drawbarplot(df, output):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import matplotlib.pyplot as plt
import seaborn as sns
# num = len(df.columns) + 1
sns.set(style="darkgrid")
fig = plt.figure()
# colors
if len(df) > len(resources.pref_colors):
colorplot = sns.husl_palette(len(df), l=.5).as_hex()
colorplot = [str(x).upper() for x in colorplot]
else:
colorplot = resources.pref_colors[:len(df)]
ax = sns.barplot(data=df, palette=colorplot)
plt.xlabel('Genomes')
plt.ylabel('Secreted Proteins')
plt.xticks(rotation=90)
fig.savefig(output, format='pdf', dpi=1000, bbox_inches='tight')
plt.close(fig)
def distance2mds(df, distance, type, output):
import numpy as np
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.manifold import MDS
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
# run distance metric on matrix and then plot using NMDS
num = len(df.index)
data = np.array(df).astype(int)
bc_dm = pairwise_distances(data, metric=distance)
mds = MDS(n_components=2, metric=False, max_iter=999,
dissimilarity='precomputed', n_init=10, verbose=0)
result = mds.fit(bc_dm)
coords = result.embedding_
stress = 'stress=' + '{0:.4f}'.format(result.stress_)
# get axis information and make square plus some padding
xcoords = abs(maxabs(coords[:, 0])) + 0.1
ycoords = abs(maxabs(coords[:, 1])) + 0.1
# setup plot
fig = plt.figure()
# colors
if len(df) > len(resources.pref_colors):
colorplot = sns.husl_palette(len(df), l=.5).as_hex()
colorplot = [str(x).upper() for x in colorplot]
else:
colorplot = resources.pref_colors[:len(df)]
for i in range(0, num):
plt.plot(coords[i, 0], coords[i, 1], 'o', markersize=9,
color=colorplot[i], label=df.index.values[i])
plt.xlabel('NMDS axis 1')
plt.ylabel('NMDS axis 2')
plt.ylim(-ycoords, ycoords)
plt.xlim(-xcoords, xcoords)
'''
if num < 13: #if number too large, don't plot
'''
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.title('NMDS analysis of '+type+' domains')
plt.annotate(stress, xy=(1, 0), xycoords='axes fraction',
fontsize=12, ha='right', va='bottom')
fig.savefig(output, format='pdf', dpi=1000, bbox_inches='tight')
plt.close(fig)
def ReciprocalBlast(filelist, protortho, cpus):
'''
function to run reciprocal diamond blast for generating proteinortho input
'''
# generate dmnd databases for each input
for x in filelist:
base = os.path.basename(x)
cmd = ['diamond', 'makedb', '--in', x, '--db', base+'.dmnd']
if not checkannotations(os.path.join(protortho, base+'.dmnd')):
runSubprocess(cmd, protortho, log)
for p in itertools.permutations(filelist, 2):
query = p[0]
target = p[1]
db = os.path.basename(target)+'.dmnd'
outname = target+'.vs.'+query+'.bla'
cmd = ['diamond', 'blastp', '--query', query, '--db', db, '--outfmt', '6',
'--out', outname, '--evalue', '1e-5', '--more-sensitive', '--threads', str(cpus)]
if not checkannotations(os.path.join(protortho, outname)):
runSubprocess4(cmd, protortho, log)
db = os.path.basename(query)+'.dmnd'
outname = query+'.vs.'+target+'.bla'
cmd = ['diamond', 'blastp', '--query', target, '--db', db, '--outfmt', '6',
'--out', outname, '--evalue', '1e-5', '--more-sensitive', '--threads', str(cpus)]
if not checkannotations(os.path.join(protortho, outname)):
runSubprocess4(cmd, protortho, log)
db = os.path.basename(target)+'.dmnd'
outname = target+'.vs.'+target+'.bla'
cmd = ['diamond', 'blastp', '--query', target, '--db', db, '--outfmt', '6',
'--out', outname, '--evalue', '1e-5', '--more-sensitive', '--threads', str(cpus)]
if not checkannotations(os.path.join(protortho, outname)):
runSubprocess4(cmd, protortho, log)
db = os.path.basename(query)+'.dmnd'
outname = query+'.vs.'+query+'.bla'
cmd = ['diamond', 'blastp', '--query', query, '--db', db, '--outfmt', '6',
'--out', outname, '--evalue', '1e-5', '--more-sensitive', '--threads', str(cpus)]
if not checkannotations(os.path.join(protortho, outname)):
runSubprocess4(cmd, protortho, log)
def singletons(poff, name):
with open(poff, 'r') as input:
count = 0
for line in input:
line = line.replace('\n', '')
if line.startswith('#'):
header = line
species = header.split('\t')[3:]
i = species.index(name.replace(' ', '_')) + 3
continue
col = line.split('\t')
if col[0] == '1' and col[i] != '*':
count += 1
return count
def orthologs(poff, name):
with open(poff, 'r') as input:
count = 0
for line in input:
line = line.replace('\n', '')
if line.startswith('#'):
header = line
species = header.split('\t')[3:]
i = species.index(name.replace(' ', '_')) + 3
continue
col = line.split('\t')
if col[0] != '1' and col[i] != '*':
count += 1
return count
def iprTSV2dict(file, terms):
iprDict = {}
with io.open(file, 'r', encoding="utf-8") as infile:
for line in infile:
if line.startswith('ENTRY_AC') or line.startswith('\n'):
continue
line = line.rstrip()
entry, type, name = line.split('\t')
if not entry in iprDict:
iprDict[entry] = name
return iprDict
def iprxml2dict(xmlfile, terms):
import xml.etree.cElementTree as cElementTree
iprDict = {}
for event, elem in cElementTree.iterparse(xmlfile):
if elem.tag == 'interpro':
ID = elem.attrib['id']
if ID in terms:
for x in elem.getchildren():
if x.tag == 'name':
description = x.text
iprDict[ID] = description
elem.clear()
else:
elem.clear()
return iprDict
def pfam2dict(file):
pfamDict = {}
with open(file, 'r') as input:
for line in input:
try:
line = line.decode('utf-8').rstrip()
except AttributeError:
line = line.rstrip()
if line.startswith('PF'): # just check to be sure
cols = line.split('\t')
ID = cols[0]
desc = cols[4]
pfamDict[ID] = desc
return pfamDict
def flipKeyValues(input):
flipped = {}
for k, v in list(input.items()):
for y in v:
if not y in flipped:
flipped[y] = k
return flipped
def dictFlip(input):
# flip the list of dictionaries
outDict = {}
for x in input:
for k, v in natsorted(iter(x.items())):
for i in v:
if i in outDict:
outDict[i].append(k)
else:
outDict[i] = [k]
return outDict
def busco_dictFlip(input):
# flip the list of dictionaries
output = []
for x in input:
outDict = {}
for k, v in natsorted(iter(x.items())):
for i in v:
if i in outDict:
outDict[i].append(k)
else:
outDict[i] = [k]
output.append(outDict)
return output
def dictFlipLookup(input, lookup):
outDict = {}
for x in input:
for k, v in natsorted(iter(x.items())):
# lookup description in another dictionary
if not lookup.get(k) is None:
result = k+': '+lookup.get(k)
else:
result = k+': No description'
result = result.encode('utf-8')
for i in v:
if i in outDict:
outDict[i].append(str(result))
else:
outDict[i] = [str(result)]
return outDict
def copyDirectory(src, dest, overwrite=False):
import shutil
if overwrite:
if os.path.isdir(dest):
shutil.rmtree(dest)
try:
shutil.copytree(src, dest)
# Directories are the same
except shutil.Error as e:
log.debug('Directory not copied. Error: %s' % e)
# Any error saying that the directory doesn't exist
except OSError as e:
log.debug('Directory not copied. Error: %s' % e)
def download_buscos(name, Database):
if name in resources.busco_links:
log.info("Downloading %s busco models" % name)
address = resources.busco_links.get(name)
filename = address.split('/')[-1]
if name == 'fungiv1':
foldername = 'fungi'
else:
foldername = filename.split('.')[0]
cmd = ['wget', '-c', '--tries=0', '--read-timeout=20', address]
runSubprocess(cmd, '.', log)
cmd = ['tar', '-zxf', filename]
runSubprocess(cmd, '.', log)
copyDirectory(os.path.abspath(foldername),
os.path.join(Database, name))
shutil.rmtree(foldername)
os.remove(filename)
else:
log.error("%s not a valid BUSCO database" % name)
validBusco = list(resources.busco_links.keys())
log.error("Valid BUSCO DBs: %s" % (', '.join(validBusco)))
sys.exit(1)
def fasta2dict(Fasta):
answer = dict()
with open(Fasta, 'r') as gbk:
SeqRecords = SeqIO.parse(gbk, 'fasta')
for record in SeqRecords:
if record.id in answer:
print("WARNING - duplicate key!")
else:
answer[record.id] = str(record.seq)
return answer
def ortho2phylogeny(folder, df, num, dict, cpus, bootstrap, tmpdir, outgroup, sp_file, name, sc_buscos, ml_method):
import pylab
from Bio import Phylo
from Bio.Phylo.Consensus import get_support
if outgroup:
# load species fasta ids into dictionary
OutGroup = {}
with open(sp_file, 'r') as sp:
for rec in SeqIO.parse(sp, 'fasta'):
OutGroup[rec.id] = rec.seq
# single copy orthologs are in a dataframe, count and then randomly select
num_species = len(df.columns)
species = df.columns.values
if len(df) == 0:
log.error("0 single copy BUSCO orthologs found, skipping phylogeny")
return
if len(df) < int(num):
number = len(df)
log.info(
"Found %i single copy BUSCO orthologs, will use all to infer phylogeny" % (len(df)))
subsampled = df
else:
number = int(num)
log.info("Found %i single copy BUSCO orthologs, will randomly select %i to infer phylogeny" % (
len(df), number))
subsampled = df.sample(n=number)
if outgroup: # passed a list to extract from parent script
busco_list = sc_buscos
# since you checked for BUSCO id across all previously, loop through first set and print BUSCOs to file
with open(os.path.join(tmpdir, 'phylogeny.buscos.used.txt'), 'w') as busco_out:
with open(os.path.join(tmpdir, 'phylogeny.concat.fa'), 'w') as proteinout:
if outgroup:
proteinout.write(">%s\n" % name)
for y in busco_list:
proteinout.write("%s" % (OutGroup.get(y)))
proteinout.write('\n')
for i in range(0, num_species):
proteinout.write(">%s\n" % species[i])
proteins = fasta2dict(os.path.join(folder, species[i]+'.faa'))
for row in subsampled[species[i]].items():
proteinout.write("%s" % proteins.get(row[1]))
busco_out.write("%s\t%s\n" % (dict[i].get(row[1]), row[1]))
proteinout.write('\n')
cmd = ['mafft', '--anysymbol', '--quiet', os.path.join(tmpdir, 'phylogeny.concat.fa')]
runSubprocess2(cmd, '.', log, os.path.join(tmpdir, 'phylogeny.mafft.fa'))
cmd = ['trimal', '-in', os.path.join(tmpdir, 'phylogeny.mafft.fa'), '-out', os.path.join(
tmpdir, 'phylogeny.trimal.phylip'), '-automated1', '-phylip']
runSubprocess(cmd, '.', log)
if ml_method == 'raxml':
cmd = ['raxmlHPC-PTHREADS', '-T', str(cpus), '-f', 'a', '-m', 'PROTGAMMAAUTO', '-p', '12345',
'-x', '12345', '-#', str(bootstrap), '-s', 'phylogeny.trimal.phylip', '-n', 'nwk']
if outgroup:
cmd = cmd + ['-o', name]
treefile = os.path.join(tmpdir, 'RAxML_bootstrap.nwk')
runSubprocess(cmd, tmpdir, log)
# parse with biopython and draw
trees = list(Phylo.parse(treefile, 'newick'))
best = Phylo.read(os.path.join(tmpdir, 'RAxML_bestTree.nwk'), 'newick')
support_tree = get_support(best, trees)
Phylo.draw(support_tree, do_show=False)
pylab.axis('off')
pylab.savefig(os.path.join(tmpdir, 'ML.phylogeny.pdf'),
format='pdf', bbox_inches='tight', dpi=1000)
else: # run iqtree as faster and better than raxml in initial testing
cmd = ['iqtree', '-s', 'phylogeny.trimal.phylip', '-nt', 'AUTO',
'-ntmax', str(cpus), '-seed', '12345', '-bb', '1000']
if outgroup:
cmd = cmd + ['-o', name]
runSubprocess(cmd, tmpdir, log)
treefile = os.path.join(tmpdir, 'phylogeny.trimal.phylip.treefile')
best = Phylo.read(treefile, 'newick')
Phylo.draw(best, do_show=False)
pylab.axis('off')
pylab.savefig(os.path.join(tmpdir, 'ML.phylogeny.pdf'),
format='pdf', bbox_inches='tight', dpi=1000)
def getTrainResults(input):
with open(input, 'r') as train:
for line in train:
try:
line = line.decode('utf-8')
except AttributeError:
pass
line = line.rstrip()
if line.startswith('nucleotide level'):
line = line.replace(' ', '')
values1 = line.split('|') # get [1] and [2]
if line.startswith('exon level'):
line = line.replace(' ', '') # get [6] and [7]
values2 = line.split('|')
if line.startswith('gene level'):
line = line.replace(' ', '')
values3 = line.split('|') # get [6] and [7]
return (float(values1[1]), float(values1[2]), float(values2[6]), float(values2[7]), float(values3[6]), float(values3[7]))
def count_multi_CDS_genes(input, filterlist):
# take funannotate annotation dictionary and return number of genes with more than one CDS
counter = 0
counter_inList = 0
for k, v in natsorted(list(input.items())):
if len(v['CDS'][0]) > 1:
counter += 1
if k in filterlist:
counter_inList += 1
return len(input), counter, len(filterlist), counter_inList
def selectTrainingModels(input, fasta, genemark_gtf, output):
from collections import OrderedDict
'''
function to take a GFF3 file and filter the gene models so they are non-overalpping
also sort the models by number of exons, the more the better.
'''
def _sortDict(d):
return (len(d[1]['CDS'][0]))
# load gene models into funannotate structured dictionary
gene_inter = defaultdict(InterLap)
Genes = {}
Genes = gff2dict(input, fasta, Genes)
# add to InterLap output proteins
proteins = 'augustus.training.proteins.fa'
ignoreList = []
keeperList = getGenesGTF(genemark_gtf)
# check number of multi-cds genes
countGenes, countGenesCDS, countKeeper, countKeeperCDS = count_multi_CDS_genes(
Genes, keeperList)
log.debug('{:,} PASA genes; {:,} have multi-CDS; {:,} from filterGeneMark; {:,} have multi-CDS'.format(
countGenes, countGenesCDS, countKeeper, countKeeperCDS))
multiCDScheck, keeperCheck = (False,)*2
if countKeeper >= 200:
keeperCheck = True
if keeperCheck:
if countKeeperCDS >= 200:
multiCDScheck = True
else:
if countGenesCDS >= 200:
multiCDScheck = True
log.debug('filterGeneMark GTF filter set to {:}; require genes with multiple CDS set to {:}'.format(
keeperCheck, multiCDScheck))
with open(proteins, 'w') as protout:
for k, v in natsorted(list(Genes.items())):
if keeperCheck and not k in keeperList:
ignoreList.append(k)
continue
if multiCDScheck and len(v['CDS'][0]) < 2:
ignoreList.append(k)
continue
# add to interlap object and write protein out
gene_inter[v['contig']].add(
(v['location'][0], v['location'][1], v['strand'], k, len(v['CDS'][0])))
protout.write('>%s___%i\n%s\n' %
(k, len(v['CDS'][0]), v['protein'][0]))
# make sure gene models are unique, so do pairwise diamond search @ 80% identity
cmd = ['diamond', 'makedb', '--in',
'augustus.training.proteins.fa', '--db', 'aug_training.dmnd']
runSubprocess4(cmd, '.', log)
cmd = ['diamond', 'blastp', '--query', 'augustus.training.proteins.fa', '--db', 'aug_training.dmnd', '--more-sensitive', '-o',
'aug.blast.txt', '-f', '6', 'qseqid', 'sseqid', 'pident', '--query-cover', '80', '--subject-cover', '80', '--id', '80', '--no-self-hits']
runSubprocess4(cmd, '.', log)
blast_results = []
with open('aug.blast.txt', 'r') as blast:
for line in blast:
line = line.rstrip()
line = line.replace('___', '\t')
blast_results.append(line.split('\t'))
sortedBlast = natsorted(
blast_results, key=lambda x: int(x[1]), reverse=True)
blastignore = []
for hit in sortedBlast:
if hit[0] in blastignore or hit[2] in blastignore:
continue
if int(hit[1]) >= int(hit[3]):
if not hit[2] in blastignore:
blastignore.append(hit[2])
else:
if not hit[0] in blastignore:
blastignore.append(hit[0])
log.debug('{:,} models fail blast identity threshold'.format(
len(blastignore)))
SafeRemove('augustus.training.proteins.fa')
SafeRemove('aug_training.dmnd')
SafeRemove('aug.blast.txt')
# now return cleaned genemark GTF file
finalIgnoreList = []
for x in ignoreList:
if not x in finalIgnoreList:
finalIgnoreList.append(x)
for y in blastignore:
if not y in finalIgnoreList:
finalIgnoreList.append(y)
log.debug('{:,} models will be ignored for training Augustus'.format(
len(finalIgnoreList)))
GenesPass = {}
for k, v in natsorted(list(Genes.items())):
if not k in finalIgnoreList and not k in GenesPass:
loc = sorted([v['location'][0], v['location'][1]])
if loc in gene_inter[v['contig']]:
hits = list(gene_inter[v['contig']].find(loc))
sortedHits = sorted(
hits, key=lambda x: int(x[4]), reverse=True)
validHits = []
for y in sortedHits:
if not y[3] in finalIgnoreList and y[3] != k:
validHits.append(y)
if len(validHits) > 0:
if not validHits[0][3] in GenesPass:
GenesPass[validHits[0][3]] = Genes.get(validHits[0][3])
else:
GenesPass[k] = v
# now sort dictionary number of exons
sGenes = sorted(iter(GenesPass.items()), key=_sortDict, reverse=True)
sortedGenes = OrderedDict(sGenes)
log.info("{:,} of {:,} models pass training parameters".format(
len(sortedGenes), len(Genes)))
# x = dict(itertools.islice(sortedGenes.items(), 0, 2500))
final = {}
for i, (k, v) in enumerate(natsorted(list(sortedGenes.items()))):
v['ids'] = ['g_'+str(i+1)+'-T1']
final['g_'+str(i+1)] = v
dict2gff3noUTRs(final, output)
return len(final)
def getGenesGTF(input):
genes = []
with open(input, 'r') as infile:
for line in infile:
if not line.startswith('\n') or not line.startswith('#'):
line = line.rstrip()
info = line.split('\t')[-1]
attributes = info.split(';')
ID = None
for x in attributes:
if x.startswith('gene_id'):
tmp = x.replace('gene_id ', '')
ID = tmp.replace('"', '')
if ID:
if not ID in genes:
genes.append(ID)
return genes
def trainAugustus(AUGUSTUS_BASE, train_species, trainingset,
genome, outdir, cpus, num_training, optimize,
config_path):
if which('randomSplit.pl'):
RANDOMSPLIT = 'randomSplit.pl'
else:
RANDOMSPLIT = os.path.join(AUGUSTUS_BASE, 'scripts', 'randomSplit.pl')
if which('optimize_augustus.pl'):
OPTIMIZE = 'optimize_augustus.pl'
else:
OPTIMIZE = os.path.join(
AUGUSTUS_BASE, 'scripts', 'optimize_augustus.pl')
if which('new_species.pl'):
NEW_SPECIES = 'new_species.pl'
else:
NEW_SPECIES = os.path.join(AUGUSTUS_BASE, 'scripts', 'new_species.pl')
aug_cpus = '--cpus='+str(cpus)
species = '--species='+train_species
aug_log = os.path.join(outdir, 'logfiles', 'augustus_training.log')
TrainSet = os.path.abspath(trainingset)
onlytrain = '--onlytrain='+TrainSet+'.train'
testtrain = TrainSet+'.test'
trainingdir = os.path.join(
outdir, 'predict_misc', 'tmp_opt_'+train_species)
myENV = os.environ
myENV['AUGUSTUS_CONFIG_PATH'] = config_path
with open(aug_log, 'w') as logfile:
if not CheckAugustusSpecies(train_species):
subprocess.call([NEW_SPECIES, '--AUGUSTUS_CONFIG_PATH={:}'.format(
config_path), species], stdout=logfile, stderr=logfile)
# run etraining again to only use best models from EVM for training
p1 = subprocess.Popen(['etraining', species, TrainSet],
cwd=os.path.join(outdir, 'predict_misc'),
stderr=logfile, stdout=logfile, env=dict(myENV))
p1.communicate()
# split off num_training models for testing purposes
subprocess.call([RANDOMSPLIT, TrainSet, str(num_training)],
cwd=os.path.join(outdir, 'predict_misc'))
if os.path.isfile(os.path.join(outdir, 'predict_misc', TrainSet+'.train')):
with open(os.path.join(outdir, 'predict_misc', 'augustus.initial.training.txt'), 'w') as initialtraining:
subprocess.call(['augustus', '--AUGUSTUS_CONFIG_PATH={:}'.format(
config_path), species, TrainSet+'.test'], stdout=initialtraining, cwd=os.path.join(outdir, 'predict_misc'))
train_results = getTrainResults(os.path.join(
outdir, 'predict_misc', 'augustus.initial.training.txt'))
trainTable = [['Feature', 'Specificity', 'Sensitivity'],
['nucleotides', '{:.1%}'.format(
train_results[0]), '{:.1%}'.format(train_results[1])],
['exons', '{:.1%}'.format(
train_results[2]), '{:.1%}'.format(train_results[3])],
['genes', '{:.1%}'.format(
train_results[4]), '{:.1%}'.format(train_results[5])]
]
log.info('Augustus initial training results:')
train_table = print_table(trainTable, return_str=True)
sys.stderr.write(train_table)
if optimize:
# now run optimization
subprocess.call([OPTIMIZE, '--AUGUSTUS_CONFIG_PATH={:}'.format(config_path), species, aug_cpus,
onlytrain, testtrain], cwd=os.path.join(outdir, 'predict_misc'), stderr=logfile, stdout=logfile)
# run etraining again
p2 = subprocess.Popen(['etraining', species, TrainSet], cwd=os.path.join(
outdir, 'predict_misc'), stderr=logfile, stdout=logfile, env=dict(myENV))
p2.communicate()
with open(os.path.join(outdir, 'predict_misc', 'augustus.final.training.txt'), 'w') as finaltraining:
subprocess.call(['augustus', '--AUGUSTUS_CONFIG_PATH={:}'.format(
config_path), species, TrainSet+'.test'], stdout=finaltraining, cwd=os.path.join(outdir, 'predict_misc'))
train_results = getTrainResults(os.path.join(
outdir, 'predict_misc', 'augustus.final.training.txt'))
trainTable = [['Feature', 'Specificity', 'Sensitivity'],
['nucleotides', '{:.1%}'.format(
train_results[0]), '{:.1%}'.format(train_results[1])],
['exons', '{:.1%}'.format(
train_results[2]), '{:.1%}'.format(train_results[3])],
['genes', '{:.1%}'.format(
train_results[4]), '{:.1%}'.format(train_results[5])]
]
log.info('Augustus optimized training results:')
train_table = print_table(trainTable, return_str=True)
sys.stderr.write(train_table)
# clean up tmp folder
shutil.rmtree(trainingdir)
else:
if train_results[4] < 0.50:
log.info(
"Accuracy seems low, you can try to improve by passing the --optimize_augustus option.")
else:
log.error("AUGUSTUS training failed, check logfiles")
sys.exit(1)
def sortList(input, col):
return natsorted(input, key=operator.itemgetter(col))
def sortHints(input, output):
data = []
with open(input, 'r') as infile:
for line in infile:
line = line.rstrip()
data.append(line.split('\t'))
# replicate this: sort -n -k 4,4 | sort -s -n -k 5,5 | sort -s -n -k 3,3 | sort -s -k 1,1
sort1 = sortList(data, 3)
sort2 = sortList(sort1, 4)
sort3 = sortList(sort2, 2)
sort4 = sortList(sort3, 0)
with open(output, 'w') as sort_out:
for line in sort4:
sort_out.write('%s\n' % '\t'.join(line))
def checkgoatools(input):
with open(input, 'r') as goatools:
count = -1
result = False
headercount = 0
for line in goatools:
count += 1
if line.startswith('GO\tNS') or line.startswith('#'):
headercount = count
if line.startswith('GO:'):
result = True
return (result, headercount)
def translatemRNA(input, output):
from Bio.SeqIO.FastaIO import SimpleFastaParser
with open(output, 'w') as outfile:
with open(input, 'r') as fasta:
for header, seq in SimpleFastaParser(fasta):
codon_start = 1
for x in header.split(' '):
if x.startswith('codon_start='):
codon_start = int(
x.replace('codon_start=', '').rstrip())
# transcripts should already be in proper orientation
protSeq = translate(seq, '+', codon_start-1)
outfile.write('>{:}\n{:}\n'.format(header, protSeq))
def alignMAFFT(input, output):
FNULL = open(os.devnull, 'w')
with open(output, 'w') as outfile:
subprocess.call(['mafft', '--anysymbol', '--quiet', input],
stderr=FNULL, stdout=outfile)
def align2Codon(alignment, transcripts, output):
FNULL = open(os.devnull, 'w')
with open(output, 'w') as outfile:
subprocess.call(['perl', os.path.join(parentdir, 'aux_scripts', 'pal2nal.pl'),
alignment, transcripts, '-output', 'fasta'], stderr=FNULL, stdout=outfile)
if getSize(output) < 1:
os.remove(output)
log.debug('dNdS Error: pal2nal failed for %s' % alignment)
def counttaxa(input):
ct = 0
with open(input, 'r') as tree:
line = tree.readline()
ct = line.count(',')+1
return ct
def getMatchFileName(pattern, directory):
result = None
for f in os.listdir(directory):
if pattern in f:
result = os.path.join(directory, f)
return result
def drawPhyMLtree(fasta, tree):
FNULL = open(os.devnull, 'w')
fc = countfasta(fasta)
# need to convert to phylip format
base = os.path.basename(fasta).split('.')[0]
dir = os.path.dirname(fasta)
tmp1 = os.path.join(dir, base+'.draw2tree.phylip')
subprocess.call(['trimal', '-in', fasta, '-out', tmp1, '-phylip'])
# draw tree
subprocess.call(['phyml', '-i', tmp1], stdout=FNULL, stderr=FNULL)
tmp2 = getMatchFileName(base+'.draw2tree.phylip_phyml_tree', dir)
# check that num taxa in tree = input
tc = counttaxa(tmp2)
if tc != fc: # something failed...
log.debug('dNdS Error: phyml tree failed for %s' % fasta)
# retry
subprocess.call(['trimal', '-in', fasta, '-out', tmp1, '-phylip'])
subprocess.call(['phyml', '-i', tmp1], stdout=FNULL, stderr=FNULL)
# rename and clean
os.rename(tmp2, tree)
SafeRemove(tmp1)
stats = getMatchFileName(base+'.draw2tree.phylip_phyml_stats', dir)
SafeRemove(stats)
def simplestTreeEver(fasta, tree):
with open(tree, 'w') as outfile:
with open(fasta, 'r') as input:
ids = []
for rec in SeqIO.parse(input, 'fasta'):
ids.append(rec.id)
outfile.write('(%s,%s);' % (ids[0], ids[1]))
def rundNdSexhaustive(folder):
# setup intermediate files
tmpdir = os.path.dirname(folder)
name = os.path.basename(folder)
transcripts = os.path.join(tmpdir, name+'.transcripts.fa')
prots = os.path.join(tmpdir, name+'.proteins.fa')
aln = os.path.join(tmpdir, name+'.aln')
codon = os.path.join(tmpdir, name+'.codon.aln')
tree = os.path.join(tmpdir, name+'.tree')
log = os.path.join(tmpdir, name+'.log')
finallog = os.path.join(tmpdir, name, name+'.log')
if not checkannotations(finallog):
num_seqs = countfasta(transcripts)
# Translate to protein space
translatemRNA(transcripts, prots)
# align protein sequences
alignMAFFT(prots, aln)
# convert to codon alignment
align2Codon(aln, transcripts, codon)
if checkannotations(codon):
if num_seqs > 2:
# now generate a tree using phyml
drawPhyMLtree(codon, tree)
else:
simplestTreeEver(transcripts, tree)
# now run codeml through ete3
etecmd = ['ete3', 'evol', '--alg', os.path.abspath(codon), '-t', os.path.abspath(
tree), '--models', 'M0', 'M1', 'M2', 'M7', 'M8', '-o', name, '--clear_all', '--codeml_param', 'cleandata,1']
with open(log, 'w') as logfile:
logfile.write('\n%s\n' % ' '.join(etecmd))
subprocess.call(etecmd, cwd=tmpdir,
stdout=logfile, stderr=logfile)
# clean up
for file in os.listdir(tmpdir):
if file.startswith(name+'.'):
os.rename(os.path.join(tmpdir, file),
os.path.join(tmpdir, name, file))
def rundNdSestimate(folder):
# setup intermediate files
tmpdir = os.path.dirname(folder)
name = os.path.basename(folder)
transcripts = os.path.join(tmpdir, name+'.transcripts.fa')
prots = os.path.join(tmpdir, name+'.proteins.fa')
aln = os.path.join(tmpdir, name+'.aln')
codon = os.path.join(tmpdir, name+'.codon.aln')
tree = os.path.join(tmpdir, name+'.tree')
log = os.path.join(tmpdir, name+'.log')
finallog = os.path.join(tmpdir, name, name+'.log')
if not checkannotations(finallog):
num_seqs = countfasta(transcripts)
# Translate to protein space
translatemRNA(transcripts, prots)
# align protein sequences
alignMAFFT(prots, aln)
# convert to codon alignment
align2Codon(aln, transcripts, codon)
if checkannotations(codon):
if num_seqs > 2:
# now generate a tree using phyml
drawPhyMLtree(codon, tree)
else:
simplestTreeEver(transcripts, tree)
# now run codeml through ete3
etecmd = ['ete3', 'evol', '--alg', os.path.abspath(codon), '-t', os.path.abspath(
tree), '--models', 'M0', '-o', name, '--clear_all', '--codeml_param', 'cleandata,1']
with open(log, 'w') as logfile:
logfile.write('\n%s\n' % ' '.join(etecmd))
subprocess.call(etecmd, cwd=tmpdir,
stdout=logfile, stderr=logfile)
# clean up
for file in os.listdir(tmpdir):
if file.startswith(name+'.'):
os.rename(os.path.join(tmpdir, file),
os.path.join(tmpdir, name, file))
def get_subdirs(a_dir):
return [os.path.join(a_dir, name) for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def get_subdirs2(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def parsedNdS(folder):
results = {}
hits = get_subdirs2(folder)
for x in hits:
finallog = os.path.join(folder, x, x+'.log')
# parse logfile to get omega
dnds = 'NA'
m1m2p = 'NA'
m7m8p = 'NA'
if os.path.isfile(finallog):
with open(finallog, 'r') as input:
for line in input:
line = line.strip()
if 'M7' in line and 'M8' in line and '|' in line:
m7m8p = line.split('|')[-1].strip()
m7m8p = m7m8p.replace('*', '')
m7m8p = '{0:.5f}'.format(float(m7m8p))
elif 'M1' in line and 'M2' in line and '|' in line:
m1m2p = line.split('|')[-1].lstrip()
m1m2p = m1m2p.replace('*', '')
m1m2p = '{0:.5f}'.format(float(m1m2p))
elif line.startswith('- Model M0'):
nextline = next(input)
dnds = nextline.split('tree: ')[1].rstrip()
results[x] = (dnds, m1m2p, m7m8p)
return results
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def getBlastDBinfo(input):
'''
function to return a tuple of info using blastdbcmd
tuple: (name, date, #sequences)
'''
cmd = ['blastdbcmd', '-info', '-db', input]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if stderr:
print((stderr.split('\n')[0]))
results = stdout.split('\n\n')
results = [x for x in results if x]
# parse results which are now in list, look for starts with Database and then Date
Name, Date, NumSeqs = (None,)*3
for x in results:
if x.startswith('Database:'):
hit = x.split('\n\t')
Name = hit[0].replace('Database: ', '')
NumSeqs = hit[1].split(' sequences;')[0].replace(',', '')
if x.startswith('Date:'):
Date = x.split('\t')[0].replace('Date: ', '')
return (Name, Date, NumSeqs)
HEADER = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
<meta name="funannotate comparative genomics output" content="">
<meta name="<NAME>" content="">
<title>Funannotate</title>
<!-- Bootstrap core CSS -->
<link href="css/bootstrap.min.css" rel="stylesheet">
<!-- Custom styles for this template -->
<link href="css/starter-template.css" rel="stylesheet">
<script src="js/ie-emulation-modes-warning.js"></script>
</head>
<body>
<nav class="navbar navbar-inverse navbar-fixed-top">
<div class="container">
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar" aria-expanded="false" aria-controls="navbar">
<span class="sr-only">Toggle navigation</span>
</button>
<a class="navbar-brand" href="index.html">Funannotate</a>
</div>
<div id="navbar" class="collapse navbar-collapse">
<ul class="nav navbar-nav">
<li><a href="stats.html">Stats</a></li>
<li><a href="phylogeny.html">Phylogeny</a></li>
<li><a href="orthologs.html">Orthologs</a></li>
<li><a href="interpro.html">InterPro</a></li>
<li><a href="pfam.html">PFAM</a></li>
<li><a href="merops.html">Merops</a></li>
<li><a href="cazy.html">CAZymes</a></li>
<li><a href="cogs.html">COGs</a></li>
<li><a href="signalp.html">SignalP</a></li>
<li><a href="tf.html">TFs</a></li>
<li><a href="secmet.html">SecMet</a></li>
<li><a href="go.html">GO</a></li>
<li><a href="citation.html">Cite</a></li>
</ul>
</div><!--/.nav-collapse -->
</div>
</nav>
'''
ORTHOLOGS = '''
<div class="container">
<div class="table">
<h2 class="sub-header">Orthologous protein groups</h2>
<div class="table-responsive">
'''
INDEX = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Funannotate Results</h2>
<br>
<p><a href='stats.html'>Genome Summary Stats</a></p>
<p><a href='phylogeny.html'>Maximum likelihood Phylogeny (RAxML)</a></p>
<p><a href='merops.html'>MEROPS Protease Stats</a></p>
<p><a href='cazy.html'>CAZyme carbohydrate activating enzyme Stats</a></p>
<p><a href='cogs.html'>COGs Stats</a></p>
<p><a href='signalp.html'>Secreted proteins (SignalP)</a></p>
<p><a href='interpro.html'>InterProScan Domain Stats</a></p>
<p><a href='tf.html'>Transcription Factor Summary</a></p>
<p><a href='secmet.html'>Secondary Metabolism Cluster Summary</a></p>
<p><a href='pfam.html'>PFAM Domain Stats</a></p>
<p><a href='go.html'>Gene Ontology Enrichment Analysis</a></p>
<p><a href='orthologs.html'>Orthologous proteins</a></p>
<br>
'''
SUMMARY = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Genome Summary Stats</h2>
<div class="table-responsive">
'''
PHYLOGENY = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">RAxML Maximum Likelihood Phylogeny</h2>
<a href='phylogeny/ML.phylogeny.pdf'><img src="phylogeny/ML.phylogeny.pdf" height="500" /></a></div>
'''
NOPHYLOGENY = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Number of species too low to generate phylogeny</h2>
'''
MEROPS = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">MEROPS Protease Families per Genome Results</h2>
<div class='row'>
<div class="col-sm-7"><a href='merops/MEROPS.graph.pdf'><img src="merops/MEROPS.graph.pdf" height="350" /></a></div>
<div class="col-sm-5"><a href='merops/MEROPS.heatmap.pdf'><img src="merops/MEROPS.heatmap.pdf" height="500" /></a></div>
</div>
<div class="table-responsive">
'''
INTERPRO = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">InterProScan Domains per Genome Results</h2>
<div class='row'>
<a href='interpro/InterProScan.nmds.pdf'><img src="interpro/InterProScan.nmds.pdf" height="500" /></a></div>
<div class="table-responsive">
'''
PFAM = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">PFAM Domains per Genome Results</h2>
<div class='row'>
<a href='pfam/PFAM.nmds.pdf'><img src="pfam/PFAM.nmds.pdf" height="500" /></a></div>
<div class="table-responsive">
'''
SIGNALP = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Secreted Proteins per Genome Results</h2>
<div class='row'>
<a href='signalp/signalp.pdf'><img src="signalp/signalp.pdf" height="500" /></a></div>
<div class="table-responsive">
'''
TF = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Fungal Transcription Factors per Genome Results</h2>
<div class='row'>
<a href='tfs/TF.heatmap.pdf'><img src="tfs/TF.heatmap.pdf" height="800" /></a></div>
<div class="table-responsive">
'''
SECMET = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Secondary Metabolism Clusters per Genome Results</h2>
<div class='row'>
<a href='secmet/SM.graph.pdf'><img src="secmet/SM.graph.pdf" height="500" /></a></div>
<div class="table-responsive">
'''
CAZY = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">CAZyme Families per Genome Results</h2>
<div class='row'>
<div class="col-sm-7"><a href='cazy/CAZy.graph.pdf'><img src="cazy/CAZy.graph.pdf" height="350" /></a></div>
<div class="col-sm-5"><a href='cazy/CAZy.heatmap.pdf'><img src="cazy/CAZy.heatmap.pdf" height="600" /></a></div>
</div>
<div class="table-responsive">
'''
COG = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Clusters of Orthologous Groups (COGs) per Genome Results</h2>
<div class='row'>
<a href='cogs/COGS.graph.pdf'><img src="cogs/COGS.graph.pdf" height="500" /></a></div>
<div class="table-responsive">
'''
GO = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">GO ontology enrichment Results</h2>
<div class='row'>
'''
MISSING = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">These data are missing from annotation.</h2>
'''
CITATION = '''
<div class="container">
<div class="starter-template">
<h3 class="sub-header">If you found Funannotate useful please cite:</h3>
<p>Palmer JM. 2016. Funannotate: a fungal genome annotation and comparative genomics pipeline. <a href="https://github.com/nextgenusfs/funannotate">https://github.com/nextgenusfs/funannotate</a>.</p>
'''
FOOTER = '''
</div>
</div>
</div><!-- /.container -->
<!-- Bootstrap core JavaScript
================================================== -->
<!-- Placed at the end of the document so the pages load faster -->
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script>
<script>window.jQuery || document.write('<script src="js/jquery.min.js"><\/script>')</script>
<script src="js/bootstrap.min.js"></script>
<!-- IE10 viewport hack for Surface/desktop Windows 8 bug -->
<script src="js/ie10-viewport-bug-workaround.js"></script>
</body>
</html>
'''
HEADER2 = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="funannotate comparative genomics output" content="">
<meta name="<NAME>" content="">
<title>Funannotate</title>
<link href="css/bootstrap.min.css" rel="stylesheet">
<link href="css/starter-template.css" rel="stylesheet">
<script src="js/ie-emulation-modes-warning.js"></script>
<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/t/bs/dt-1.10.11/datatables.min.css"/>
<script type="text/javascript" src="https://cdn.datatables.net/t/bs/dt-1.10.11/datatables.min.js"></script>
</head>
<body>
<nav class="navbar navbar-inverse navbar-fixed-top">
<div class="container-fluid">
<div class="navbar-header">
<span class="sr-only">Toggle navigation</span>
<a class="navbar-brand" href="index.html">Funannotate</a>
</div>
<div class="navbar-header">
<div id="navbar" class="collapse navbar-collapse">
<ul class="nav navbar-nav">
<li class="active"><a href="stats.html">Stats</a></li>
<li><a href="orthologs.html">Orthologs</a></li>
<li><a href="interpro.html">InterProScan</a></li>
<li><a href="pfam.html">PFAM</a></li>
<li><a href="merops.html">Merops</a></li>
<li><a href="cazy.html">CAZymes</a></li>
<li><a href="signalp.html">SignalP</a></li>
<li><a href="go.html">GO ontology</a></li>
<li><a href="citation.html">Citation</a></li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown" role="button" aria-haspopup="true" aria-expanded="false">Genomes <span class="caret"></span></a>
<ul class="dropdown-menu">
'''
|
Ch16/MRF.py
|
jason-168/MLCode
| 146 |
131092
|
# Code from Chapter 16 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by <NAME> (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# <NAME>, 2008, 2014
# Demonstration of the Markov Random Field method of image denoising
import pylab as pl
import numpy as np
def MRF(I,J,eta=2.0,zeta=1.5):
ind =np.arange(np.shape(I)[0])
np.random.shuffle(ind)
orderx = ind.copy()
np.random.shuffle(ind)
for i in orderx:
for j in ind:
oldJ = J[i,j]
J[i,j]=1
patch = 0
for k in range(-1,1):
for l in range(-1,1):
patch += J[i,j] * J[i+k,j+l]
energya = -eta*np.sum(I*J) - zeta*patch
J[i,j]=-1
patch = 0
for k in range(-1,1):
for l in range(-1,1):
patch += J[i,j] * J[i+k,j+l]
energyb = -eta*np.sum(I*J) - zeta*patch
if energya<energyb:
J[i,j] = 1
else:
J[i,j] = -1
return J
I = pl.imread('world.png')
N = np.shape(I)[0]
I = I[:,:,0]
I = np.where(I<0.1,-1,1)
pl.imshow(I)
pl.title('Original Image')
noise = np.random.rand(N,N)
J = I.copy()
ind = np.where(noise<0.1)
J[ind] = -J[ind]
pl.figure()
pl.imshow(J)
pl.title('Noisy image')
newJ = J.copy()
newJ = MRF(I,newJ)
pl.figure()
pl.imshow(newJ)
pl.title('Denoised version')
print np.sum(I-J), np.sum(I-newJ)
pl.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.