content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import os
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']=''
import numpy as np
from tensorflow.keras.layers import Input, Dense, SimpleRNN, GRU, LSTM, Bidirectional
from tensorflow.keras.models import Model
REC = LSTM
sequence_length = 3
feature_dim = 1
features_in = Input(batch_shape=(1, sequence_length, feature_dim))
rnn_out = Bidirectional( REC(1, activation=None, use_bias=False, return_sequences=True, return_state=False, stateful=False))(features_in)
stateless_model = Model(inputs=[features_in], outputs=[rnn_out])
stateful_rnn_out = Bidirectional( REC(1, activation=None, use_bias=False, return_sequences=True, return_state=False, stateful=True))(features_in)
stateful_model = Model(inputs=features_in, outputs=stateful_rnn_out)
stateful_model.set_weights( stateless_model.get_weights() )
x_in = np.random.normal(0,10,sequence_length)
x_in = x_in.reshape( (1, sequence_length, feature_dim) )
def print_bidi_out(non_stateful_out, stateful_out):
fb = ['FWD::', 'BWD::']
for i in range(2):
print(fb[i])
print(f'non_stateful: {non_stateful_out.T[i]}')
print(f'stateful: {stateful_out.T[i]}')
print(f'delta: {stateful_out.T[i]-non_stateful_out.T[i]}')
non_stateful_out = stateless_model.predict(x_in).reshape((sequence_length,2))
stateful_out = stateful_model.predict(x_in).reshape((sequence_length,2))
print_bidi_out(non_stateful_out, stateful_out)
non_stateful_out = stateless_model.predict(x_in).reshape((sequence_length,2))
stateful_out = stateful_model.predict(x_in).reshape((sequence_length,2))
print_bidi_out(non_stateful_out, stateful_out)
print('\n** RESETING STATES in STATEFUL MODEL **\n')
stateful_model.reset_states()
non_stateful_out = stateless_model.predict(x_in).reshape((sequence_length,2))
stateful_out = stateful_model.predict(x_in).reshape((sequence_length,2))
print_bidi_out(non_stateful_out, stateful_out)
|
python
|
import b128
import itertools
import os
import plyvel
import secp256k1
from binascii import unhexlify
from utxo.script import OP_DUP, OP_HASH160, OP_EQUAL, \
OP_EQUALVERIFY, OP_CHECKSIG
def ldb_iter(datadir):
db = plyvel.DB(os.path.join(datadir, "chainstate"), compression=None)
obf_key = db.get((unhexlify("0e00") + "obfuscate_key"))
if obf_key is not None:
pre = 'C'
obf_key = map(ord, obf_key[1:])
else:
pre = 'c'
def norm(raw):
key, value = raw
if obf_key is not None:
value = deobfuscate(obf_key, value)
return parse_ldb_value(key, value)
else:
return parse_ldb_value_old(key, value)
it = db.iterator(prefix=pre)
it = itertools.imap(norm, it)
if obf_key is None:
it = itertools.chain.from_iterable(it)
return it
def parse_ldb_value(key, raw):
tx_hash = key[1:33]
index = b128.parse(key[33:])[0]
code, raw = b128.read(raw)
height = code >> 1
amt_comp, raw = b128.read(raw)
amt = b128.decompress_amount(amt_comp)
script_code, raw = b128.read(raw)
script = decompress_raw(script_code, raw)[0]
return tx_hash, height, index, amt, script
def parse_ldb_value_old(key, raw):
tx_hash = key[1:]
version, raw = b128.read(raw)
code, raw = b128.read(raw)
first_two = (code & (2 | 4)) >> 1
n = (code >> 3) + (first_two == 0)
offset = 0
bitv = first_two
if n > 0:
while n:
n -= (ord(raw[offset]) != 0)
offset += 1
bitv = (int(raw[:offset][::-1].encode('hex'), 16) << 2) | first_two
raw = raw[offset:]
i = 0
utxos = []
while bitv > 0:
if bitv & 1:
amt_comp, raw = b128.read(raw)
amt = b128.decompress_amount(amt_comp)
script_code, raw = b128.read(raw)
script, raw = decompress_raw(script_code, raw, chomp=True)
ut = (tx_hash, None, i, amt, script)
utxos.append(ut)
bitv >>= 1
i += 1
height, raw = b128.read(raw)
assert len(raw) == 0
ret = [u[:1] + (height,) + u[2:] for u in utxos]
return ret
def decompress_raw(comp_type, raw, chomp=False):
if comp_type == 0 or comp_type == 1:
l = 20
elif comp_type >= 2 and comp_type <= 5:
l = 32
else:
l = comp_type - 6
data = raw[:l]
raw = raw[l:]
if not chomp:
assert len(raw) == 0
if comp_type == 0:
script = OP_DUP + OP_HASH160 + chr(20) + data + \
OP_EQUALVERIFY + OP_CHECKSIG
elif comp_type == 1:
script = OP_HASH160 + chr(20) + data + OP_EQUAL
elif comp_type == 2 or comp_type == 3:
script = chr(33) + chr(comp_type) + data + OP_CHECKSIG
elif comp_type == 4 or comp_type == 5:
comp_pubkey = chr(comp_type - 2) + data
pubkey = secp256k1.PublicKey(
comp_pubkey, raw=True
).serialize(compressed=False)
script = chr(65) + pubkey + OP_CHECKSIG
else:
script = data
return script, raw
def deobfuscate(key, obf):
n = len(key)
de = [chr(key[i % n] ^ ord(b)) for i, b in enumerate(obf)]
return "".join(de)
|
python
|
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Provider Model Serializers."""
import logging
from collections import defaultdict
from django.conf import settings
from django.db import transaction
from rest_framework import serializers
from rest_framework.fields import empty
from api.common import error_obj
from api.iam.serializers import AdminCustomerSerializer
from api.iam.serializers import CustomerSerializer
from api.iam.serializers import UserSerializer
from api.provider.models import Provider
from api.provider.models import ProviderAuthentication
from api.provider.models import ProviderBillingSource
from api.utils import DateHelper
from providers.provider_access import ProviderAccessor
from providers.provider_errors import ProviderErrors
LOG = logging.getLogger(__name__)
PROVIDER_CHOICE_LIST = [
provider[0]
for provider in Provider.PROVIDER_CHOICES
if (settings.DEVELOPMENT or (not settings.DEVELOPMENT and "-local" not in provider[0].lower()))
]
LCASE_PROVIDER_CHOICE_LIST = [provider.lower() for provider in PROVIDER_CHOICE_LIST]
REPORT_PREFIX_MAX_LENGTH = 64
def validate_field(data, valid_fields, key):
"""Validate a field."""
message = f"One or more required fields is invalid/missing. Required fields are {valid_fields}"
diff = set(valid_fields) - set(data)
if not diff:
return data
raise serializers.ValidationError(error_obj(key, message))
class ProviderAuthenticationSerializer(serializers.ModelSerializer):
"""Serializer for the Provider Authentication model."""
uuid = serializers.UUIDField(read_only=True)
credentials = serializers.JSONField(allow_null=False, required=True)
class Meta:
"""Metadata for the serializer."""
model = ProviderAuthentication
fields = ("uuid", "credentials")
class AWSAuthenticationSerializer(ProviderAuthenticationSerializer):
"""AWS auth serializer."""
def validate_credentials(self, creds):
"""Validate credentials field."""
key = "role_arn"
fields = ["role_arn"]
return validate_field(creds, fields, key)
class OCIAuthenticationSerializer(ProviderAuthenticationSerializer):
"""OCI auth serializer."""
def validate_credentials(self, creds):
"""Validate credentials field."""
key = "tenant"
fields = ["tenant"]
return validate_field(creds, fields, key)
class AzureAuthenticationSerializer(ProviderAuthenticationSerializer):
"""Azure auth serializer."""
def validate_credentials(self, creds):
"""Validate credentials field."""
key = ""
fields = ["subscription_id", "tenant_id", "client_id", "client_secret"]
return validate_field(creds, fields, key)
def to_representation(self, instance):
"""Control output of serializer."""
provider = super().to_representation(instance)
if provider.get("authentication", {}).get("credentials", {}).get("client_secret"):
del provider["authentication"]["credentials"]["client_secret"]
return provider
class GCPAuthenticationSerializer(ProviderAuthenticationSerializer):
"""GCP auth serializer."""
def validate_credentials(self, creds):
"""Validate credentials field."""
key = "project_id"
fields = ["project_id"]
return validate_field(creds, fields, key)
class IBMAuthenticationSerializer(ProviderAuthenticationSerializer):
"""IBM auth serializer."""
def validate_credentials(self, creds):
"""Validate credentials field."""
key = "iam_token"
fields = ["iam_token"]
return validate_field(creds, fields, key)
class OCPAuthenticationSerializer(ProviderAuthenticationSerializer):
"""OCP auth serializer."""
def validate_credentials(self, creds):
"""Validate credentials field."""
key = "cluster_id"
fields = ["cluster_id"]
return validate_field(creds, fields, key)
class ProviderBillingSourceSerializer(serializers.ModelSerializer):
"""Serializer for the Provider Billing Source model."""
uuid = serializers.UUIDField(read_only=True)
data_source = serializers.JSONField(allow_null=False, required=True)
class Meta:
"""Metadata for the serializer."""
model = ProviderBillingSource
fields = ("uuid", "data_source")
class AWSBillingSourceSerializer(ProviderBillingSourceSerializer):
"""AWS billing source serializer."""
def validate_data_source(self, data_source):
"""Validate data_source field."""
key = "provider.data_source"
fields = ["bucket"]
return validate_field(data_source, fields, key)
class OCIBillingSourceSerializer(ProviderBillingSourceSerializer):
"""OCI billing source serializer."""
data_source = serializers.JSONField(required=False, default={})
class AzureBillingSourceSerializer(ProviderBillingSourceSerializer):
"""Azure billing source serializer."""
def validate_data_source(self, data_source):
"""Validate data_source field."""
key = "provider.data_source"
fields = ["resource_group", "storage_account"]
return validate_field(data_source, fields, key)
class GCPBillingSourceSerializer(ProviderBillingSourceSerializer):
"""GCP billing source serializer."""
def validate_data_source(self, data_source):
"""Validate data_source field."""
key = "provider.data_source"
fields = ["dataset"]
data = validate_field(data_source, fields, key)
report_prefix = data_source.get("report_prefix", "")
if report_prefix and len(report_prefix) > REPORT_PREFIX_MAX_LENGTH:
key = "data_source.report_prefix"
message = f"Ensure this field has no more than {REPORT_PREFIX_MAX_LENGTH} characters."
raise serializers.ValidationError(error_obj(key, message))
return data
class IBMBillingSourceSerializer(ProviderBillingSourceSerializer):
"""IBM billing source serializer."""
def validate_data_source(self, data_source):
"""Validate data_source field."""
key = "provider.data_source"
fields = ["enterprise_id"]
return validate_field(data_source, fields, key)
class OCPBillingSourceSerializer(ProviderBillingSourceSerializer):
"""OCP billing source serializer."""
data_source = serializers.JSONField(required=False, default={})
# Registry of authentication serializers.
AUTHENTICATION_SERIALIZERS = {
Provider.PROVIDER_AWS: AWSAuthenticationSerializer,
Provider.PROVIDER_AWS_LOCAL: AWSAuthenticationSerializer,
Provider.PROVIDER_OCI: OCIAuthenticationSerializer,
Provider.PROVIDER_OCI_LOCAL: OCIAuthenticationSerializer,
Provider.PROVIDER_AZURE: AzureAuthenticationSerializer,
Provider.PROVIDER_AZURE_LOCAL: AzureAuthenticationSerializer,
Provider.PROVIDER_GCP: GCPAuthenticationSerializer,
Provider.PROVIDER_GCP_LOCAL: GCPAuthenticationSerializer,
Provider.PROVIDER_IBM: IBMAuthenticationSerializer,
Provider.PROVIDER_IBM_LOCAL: IBMAuthenticationSerializer,
Provider.PROVIDER_OCP: OCPAuthenticationSerializer,
Provider.OCP_AWS: AWSAuthenticationSerializer,
Provider.OCP_AZURE: AzureAuthenticationSerializer,
}
# Registry of billing_source serializers.
BILLING_SOURCE_SERIALIZERS = {
Provider.PROVIDER_AWS: AWSBillingSourceSerializer,
Provider.PROVIDER_AWS_LOCAL: AWSBillingSourceSerializer,
Provider.PROVIDER_OCI: OCIBillingSourceSerializer,
Provider.PROVIDER_OCI_LOCAL: OCIBillingSourceSerializer,
Provider.PROVIDER_AZURE: AzureBillingSourceSerializer,
Provider.PROVIDER_AZURE_LOCAL: AzureBillingSourceSerializer,
Provider.PROVIDER_GCP: GCPBillingSourceSerializer,
Provider.PROVIDER_GCP_LOCAL: GCPBillingSourceSerializer,
Provider.PROVIDER_IBM: IBMBillingSourceSerializer,
Provider.PROVIDER_IBM_LOCAL: IBMBillingSourceSerializer,
Provider.PROVIDER_OCP: OCPBillingSourceSerializer,
Provider.OCP_AWS: AWSBillingSourceSerializer,
Provider.OCP_AZURE: AzureBillingSourceSerializer,
}
class ProviderSerializer(serializers.ModelSerializer):
"""Serializer for the Provider model."""
uuid = serializers.UUIDField(allow_null=True, required=False)
name = serializers.CharField(max_length=256, required=True, allow_null=False, allow_blank=False)
type = serializers.ChoiceField(choices=LCASE_PROVIDER_CHOICE_LIST)
created_timestamp = serializers.DateTimeField(read_only=True)
customer = CustomerSerializer(read_only=True)
created_by = UserSerializer(read_only=True)
active = serializers.BooleanField(read_only=True)
paused = serializers.BooleanField(required=False)
class Meta:
"""Metadata for the serializer."""
model = Provider
fields = (
"uuid",
"name",
"type",
"authentication",
"billing_source",
"customer",
"created_by",
"created_timestamp",
"active",
"paused",
)
def __init__(self, instance=None, data=empty, **kwargs):
"""Initialize the Provider Serializer.
Here we ensure we use the appropriate serializer to validate the
authentication and billing_source parameters.
"""
super().__init__(instance, data, **kwargs)
provider_type = None
if data and data != empty:
provider_type = data.get("type")
if provider_type and provider_type.lower() not in LCASE_PROVIDER_CHOICE_LIST:
key = "type"
message = f"{provider_type} is not a valid source type."
raise serializers.ValidationError(error_obj(key, message))
if provider_type:
provider_type = provider_type.lower()
self.fields["authentication"] = AUTHENTICATION_SERIALIZERS.get(
Provider.PROVIDER_CASE_MAPPING.get(provider_type)
)()
self.fields["billing_source"] = BILLING_SOURCE_SERIALIZERS.get(
Provider.PROVIDER_CASE_MAPPING.get(provider_type)
)()
else:
self.fields["authentication"] = ProviderAuthenticationSerializer()
self.fields["billing_source"] = ProviderBillingSourceSerializer()
@property
def demo_credentials(self):
"""Build formatted credentials for our nise-populator demo accounts."""
creds_by_source_type = defaultdict(list)
for account, cred_dict in settings.DEMO_ACCOUNTS.items():
for cred, info in cred_dict.items():
if info.get("source_type") == Provider.PROVIDER_AWS:
creds_by_source_type[Provider.PROVIDER_AWS].append({"role_arn": cred})
elif info.get("source_type") == Provider.PROVIDER_AZURE:
creds_by_source_type[Provider.PROVIDER_AZURE].append({"client_id": cred})
elif info.get("source_type") == Provider.PROVIDER_GCP:
creds_by_source_type[Provider.PROVIDER_GCP].append({"project_id": cred})
return creds_by_source_type
def get_request_info(self):
"""Obtain request information like user and customer context."""
user = self.context.get("user")
customer = self.context.get("customer")
if user and customer:
return user, customer
request = self.context.get("request")
if request and hasattr(request, "user"):
user = request.user
if user.customer:
customer = user.customer
else:
key = "customer"
message = "Customer for requesting user could not be found."
raise serializers.ValidationError(error_obj(key, message))
else:
key = "created_by"
message = "Requesting user could not be found."
raise serializers.ValidationError(error_obj(key, message))
return user, customer
@transaction.atomic
def create(self, validated_data):
"""Create a provider from validated data."""
user, customer = self.get_request_info()
provider_type = validated_data["type"].lower()
provider_type = Provider.PROVIDER_CASE_MAPPING.get(provider_type)
validated_data["type"] = provider_type
interface = ProviderAccessor(provider_type)
authentication = validated_data.pop("authentication")
credentials = authentication.get("credentials")
billing_source = validated_data.pop("billing_source")
data_source = billing_source.get("data_source")
if self._is_demo_account(provider_type, credentials):
LOG.info("Customer account is a DEMO account. Skipping cost_usage_source_ready check.")
else:
interface.cost_usage_source_ready(credentials, data_source)
bill, __ = ProviderBillingSource.objects.get_or_create(**billing_source)
auth, __ = ProviderAuthentication.objects.get_or_create(**authentication)
# We can re-use a billing source or a auth, but not the same combination.
dup_queryset = (
Provider.objects.filter(authentication=auth).filter(billing_source=bill).filter(customer=customer)
)
if dup_queryset.count() != 0:
conflict_provider = dup_queryset.first()
message = (
f"Cost management does not allow duplicate accounts. "
f"{conflict_provider.name} already exists. Edit source settings to configure a new source."
)
LOG.warn(message)
raise serializers.ValidationError(error_obj(ProviderErrors.DUPLICATE_AUTH, message))
provider = Provider.objects.create(**validated_data)
provider.customer = customer
provider.created_by = user
provider.authentication = auth
provider.billing_source = bill
provider.active = True
provider.save()
customer.date_updated = DateHelper().now_utc
customer.save()
return provider
def update(self, instance, validated_data):
"""Update a Provider instance from validated data."""
_, customer = self.get_request_info()
provider_type = validated_data["type"].lower()
provider_type = Provider.PROVIDER_CASE_MAPPING.get(provider_type)
validated_data["type"] = provider_type
interface = ProviderAccessor(provider_type)
authentication = validated_data.pop("authentication")
credentials = authentication.get("credentials")
billing_source = validated_data.pop("billing_source")
data_source = billing_source.get("data_source")
# updating `paused` must happen regardless of Provider availabilty
instance.paused = validated_data.pop("paused", instance.paused)
try:
if self._is_demo_account(provider_type, credentials):
LOG.info("Customer account is a DEMO account. Skipping cost_usage_source_ready check.")
else:
interface.cost_usage_source_ready(credentials, data_source)
except serializers.ValidationError as validation_error:
instance.active = False
instance.save()
raise validation_error
with transaction.atomic():
bill, __ = ProviderBillingSource.objects.get_or_create(**billing_source)
auth, __ = ProviderAuthentication.objects.get_or_create(**authentication)
if instance.billing_source != bill or instance.authentication != auth:
dup_queryset = (
Provider.objects.filter(authentication=auth).filter(billing_source=bill).filter(customer=customer)
)
if dup_queryset.count() != 0:
conflict_provder = dup_queryset.first()
message = (
f"Cost management does not allow duplicate accounts. "
f"{conflict_provder.name} already exists. Edit source settings to configure a new source."
)
LOG.warn(message)
raise serializers.ValidationError(error_obj(ProviderErrors.DUPLICATE_AUTH, message))
for key in validated_data.keys():
setattr(instance, key, validated_data[key])
instance.authentication = auth
instance.billing_source = bill
instance.active = True
instance.save()
customer.date_updated = DateHelper().now_utc
customer.save()
return instance
def _is_demo_account(self, provider_type, credentials):
"""Test whether this source is a demo account."""
key_types = {
Provider.PROVIDER_AWS: "role_arn",
Provider.PROVIDER_AZURE: "client_id",
Provider.PROVIDER_GCP: "project_id",
}
key_to_check = key_types.get(provider_type, "")
creds_to_check = self.demo_credentials.get(provider_type, [])
for cred in creds_to_check:
if credentials.get(key_to_check, True) == cred.get(key_to_check, False):
return True
return False
class AdminProviderSerializer(ProviderSerializer):
"""Provider serializer specific to service admins."""
customer = AdminCustomerSerializer(read_only=True)
|
python
|
"""
collision_detection.py is used on each iteration to detect whether
an agent has collided with walls and to provide an adequate environment
response (i.e. updated position & velocity such that agen slides along the wall).
"""
import numpy as np
import pygame as pg
from decimal import Decimal
import configs as cfg
import maze
x_var = cfg.X
y_var = cfg.Y
pos = cfg.BOID_POS_VAR * cfg.Dimensions
vel = cfg.BOID_VEL_VAR * cfg.Dimensions
class Amendments:
""" Amendment data holder class """
# Field indices in the packet generated by self.get_packet()
amount_i = 0
indices_i = 1
values_i = 2
def __init__(self):
self.amount = 0
self.indices = []
self.values = []
def get_packet(self):
""" Returns all amendments in a packet format """
return (np.uint16(self.amount),
np.asarray(self.indices, dtype=np.uint16),
np.asarray(self.values, dtype=np.float32))
def clear(self):
self.amount = 0
self.indices = []
self.values = []
def run(flock, previous_flock, amaze, template_triangles, amendments):
"""
Detects collisions and calculates required amendments that
allow boid to avoid collisions.
For each boid it first checks if boid collides with the wall by rotating on the
same spot. If it is, boid is moved out of the wall. If it isn't, the checking continues:
it calculates its impulse (desired dislocation vector) and
breaks it into steps. For each step (partial impulse) it checks if a wall
is hit. If it is, boid slides along it. Multiple walls will be properly processed.
TODO: Currently it's imprecise near the corners - there's a small transparent square
on the corner of the wall with the size (cfg.collision_check_stop, cfg.collision_check_stop),
and boid can go through it. Implementing proper processing may require more complex logic
and is out of the scope of this project.
"""
amendments.clear()
i = 0
for boid in flock.np_arrays:
impulse = np.hypot(boid[vel + x_var], boid[vel + y_var])
if impulse > 0:
# We'll start from previous position and if no walls are hit,
# increase it up to the new boid position
boid[pos + x_var] = previous_flock.np_arrays[i][pos + x_var]
boid[pos + y_var] = previous_flock.np_arrays[i][pos + y_var]
template_triangle = template_triangles[min(
int(np.round(np.degrees(flock.object_list[i].orientation))),
359)]
triangle_offset = template_triangle.get_triangle_top_left()
triangle_rect = template_triangle.rect.copy()
collision_detected = False
# Fisrt check if the boid has collided into a wall without
# moving (e.g. rotated near the wall)
# ------------------------------------------------------
hit_top, hit_right, hit_bottom, hit_left = \
check_for_collision([boid[pos + x_var],
boid[pos + y_var]],
[boid[vel + x_var],
boid[vel + y_var]],
triangle_rect,
triangle_offset,
amaze)
if hit_right or hit_left or hit_top or hit_bottom:
collision_detected = True
if cfg.bounding_rects_show:
flock.object_list[i].collided = True
dx = dy = 0
if hit_right:
wall_left_x = np.trunc(triangle_rect.right / cfg.tile_width) * cfg.tile_width
# dx will be negative
dx = wall_left_x - triangle_rect.right
if hit_left:
wall_right_x = np.ceil(triangle_rect.left / cfg.tile_width) * cfg.tile_width
# dx will be positive
dx = wall_right_x - triangle_rect.left
if hit_top:
wall_above_y = np.ceil(triangle_rect.top / cfg.tile_height) * cfg.tile_height
# dy will be positive
dy = wall_above_y - triangle_rect.top
if hit_bottom:
wall_below_y = np.trunc(triangle_rect.bottom / cfg.tile_height) * cfg.tile_height
# dy will be negative
dy = wall_below_y - triangle_rect.bottom
deltas_in_tiles = maze.to_unit_tiles(dx, dy)
boid[pos + x_var] = boid[pos + x_var] + deltas_in_tiles[x_var]
boid[pos + y_var] = boid[pos + y_var] + deltas_in_tiles[y_var]
# Collision check for this boid is finished
if not collision_detected:
# First position is unobstructed, so check positions ahead
# ------------------------------------------------------
unit_impulse = cfg.collision_check_step
# noinspection PyTypeChecker
dx = boid[vel + x_var] * unit_impulse / impulse # Unit squares
# noinspection PyTypeChecker
dy = boid[vel + y_var] * unit_impulse / impulse # Unit squares
number_of_checks = int(np.ceil(impulse / unit_impulse))
for j in range(0, number_of_checks):
if (j + 1) * unit_impulse > impulse: # Last step can be smaller
# Using Decimal here as float != float - 0 and Decimal is exact.
# Python uses approximate values and it negatively manifests itself here.
unit_impulse = np.float32(Decimal(impulse - unit_impulse * j))
dx = boid[vel + x_var] * unit_impulse / impulse # Unit squares
dy = boid[vel + y_var] * unit_impulse / impulse # Unit squares
hit_top, hit_right, hit_bottom, hit_left = \
check_for_collision([boid[pos + x_var] + dx,
boid[pos + y_var] + dy],
[boid[vel + x_var],
boid[vel + y_var]],
triangle_rect,
triangle_offset,
amaze)
if hit_right or hit_left or hit_top or hit_bottom:
collision_detected = True
if cfg.bounding_rects_show:
flock.object_list[i].collided = True
# Nullify impulse if a wall is on the way
if (dx > 0 and hit_right) or (dx < 0 and hit_left):
dx = 0
if (dy > 0 and hit_bottom) or (dy < 0 and hit_top):
dy = 0
if dx == 0 and dy == 0:
# Can't proceed
break
if not maze.outside_maze(boid[pos + x_var] + dx,
boid[pos + y_var] + dy):
# The boid was moved outside the maze
# Apply amendments to the host data according to the type of collision
# I.e. slide along the wall
boid[pos + x_var] = boid[pos + x_var] + dx
boid[pos + y_var] = boid[pos + y_var] + dy
else:
# Boid is outside the maze, no point continuing the check
break
if collision_detected:
# Save amendments to transfer them later to the GPU
amendments.values.append(np.copy([boid[pos + x_var],
boid[pos + y_var]]))
amendments.indices.append(i)
amendments.amount += 1
i += 1
def check_for_collision(boid_center, boid_impulse, triangle_rect, triangle_offset, amaze):
""" Returns collision types (left, right, top, bottom) """
triangle_rect_coors = maze.to_coors(
boid_center[x_var],
boid_center[y_var])
triangle_rect.left = triangle_rect_coors[x_var] + triangle_offset[x_var]
triangle_rect.top = triangle_rect_coors[y_var] + triangle_offset[y_var]
# Get new neighboring walls as a list of coordinate pairs
neighboring_walls = \
maze.get_neighboring_tiles(boid_center[x_var], boid_center[y_var],
amaze, maze.Wall, include_none=False)
# Convert coordinates into rects
neighboring_walls_rects = []
for wall in neighboring_walls:
neighboring_walls_rects.append(
pg.Rect(wall[x_var] * cfg.tile_width, wall[y_var] * cfg.tile_height,
cfg.tile_width, cfg.tile_height))
# Check if triangle collides with any of them
colliding_walls = triangle_rect.collidelistall(neighboring_walls_rects)
hit_top = hit_bottom = hit_left = hit_right = False
diagonal_collision = None
if colliding_walls:
# Collision detected
for wall_i in colliding_walls:
# Get collision type (horizontal/vertical)
collision_types = get_collision_type(neighboring_walls[wall_i][x_var],
neighboring_walls[wall_i][y_var],
maze.to_unit_tiles(triangle_rect.centerx,
triangle_rect.centery),
triangle_rect)
if collision_types[0] == maze.Orientation.diagonal:
diagonal_collision = collision_types[1:]
else:
for collision_type in collision_types:
if collision_type == maze.Location.top:
hit_top = True
if collision_type == maze.Location.bottom:
hit_bottom = True
if collision_type == maze.Location.left:
hit_left = True
if collision_type == maze.Location.right:
hit_right = True
if diagonal_collision is not None:
if not (hit_top or hit_bottom or hit_left or hit_right):
# If boid has collided only with a diagonal wall, then alter
# its velocity, otherwise ignore it.
if diagonal_collision == [maze.Location.left, maze.Location.bottom]:
if np.abs(boid_impulse[y_var]) > np.abs(boid_impulse[x_var]):
hit_left = True
else:
hit_bottom = True
if diagonal_collision == [maze.Location.right, maze.Location.top]:
if np.abs(boid_impulse[y_var]) > np.abs(boid_impulse[x_var]):
hit_right = True
else:
hit_top = True
if diagonal_collision == [maze.Location.right, maze.Location.bottom]:
if np.abs(boid_impulse[y_var]) > np.abs(boid_impulse[x_var]):
hit_right = True
else:
hit_bottom = True
return hit_top, hit_right, hit_bottom, hit_left
def get_collision_type(wall_x_float, wall_y_float, boid_pos_float, triangle_rect):
"""
Returns thetype of collision (horizontal/vertical).
C H C
V b V
C H C
(H - horizontal, V - vertical, C - corner, b - boid previous position)
"""
wall_x = int(wall_x_float)
wall_y = int(wall_y_float)
boid_x = int(boid_pos_float[x_var])
boid_y = int(boid_pos_float[y_var])
if wall_x != boid_x and wall_y != boid_y:
# Corner wall
return get_diagonal_collision_type(wall_x, wall_y, [boid_x, boid_y], triangle_rect)
if wall_y != boid_y:
# Horizontal wall
if wall_y < boid_y:
return [maze.Location.top, ]
else:
return [maze.Location.bottom, ]
# Vertical wall
if wall_x > boid_x:
return [maze.Location.right, ]
else:
return [maze.Location.left, ]
def get_diagonal_collision_type(wall_x, wall_y, boid_center, triangle_rect):
""" Checks with which side of the diagonally positioned (not oriented) wall boid has collided """
# Get wall type
diagonal_wall_position = 0
if wall_x == np.trunc(boid_center[x_var]) - 1:
""" T F F
F F F
T F F
(one of the "True" walls) """
if wall_y == np.trunc(boid_center[y_var]) - 1:
diagonal_wall_position = (maze.Location.left, maze.Location.top)
else:
diagonal_wall_position = (maze.Location.left, maze.Location.bottom)
if wall_x == np.trunc(boid_center[x_var]) + 1:
""" F F T
F F F
F F T
(one of the "True" walls) """
if wall_y == np.trunc(boid_center[y_var]) - 1:
diagonal_wall_position = (maze.Location.right, maze.Location.top)
else:
diagonal_wall_position = (maze.Location.right, maze.Location.bottom)
wall_left, wall_top = maze.to_coors(wall_x,
wall_y)
wall_right, wall_bottom = maze.to_coors(wall_x + 1,
wall_y + 1)
precision_x = cfg.collision_check_step * cfg.window_width
precision_y = cfg.collision_check_step * cfg.window_height
# Get collision type
wall_on_left = None
wall_on_right = None
wall_above = None
wall_below = None
if diagonal_wall_position[1] == maze.Location.top and triangle_rect.top >= wall_top - precision_y:
wall_above = True
if diagonal_wall_position[1] == maze.Location.bottom and triangle_rect.bottom <= wall_top + precision_y:
wall_below = True
if diagonal_wall_position[0] == maze.Location.right:
# One of the walls on right from the boid's position
if triangle_rect.right <= wall_left + precision_x:
# Boid is at least on the left edge of the wall
wall_on_right = True
if wall_on_right and (wall_above or wall_below):
# Boid is on both edges of the wall, i.e. on its corner
return [maze.Orientation.diagonal, maze.Location.right, diagonal_wall_position[1]]
if wall_on_right:
# Bois is only on the left edge of the wall
return [maze.Orientation.diagonal, maze.Location.right]
else: # diagonal_wall_position[0] == maze.Location.left
# One of the walls on left from the boid's position
if triangle_rect.left >= wall_right - precision_x:
# Boid is at least on the right edge of the wall
wall_on_left = True
if wall_on_left and (wall_above or wall_below):
# Boid is on both edges of the wall, i.e. on its corner
return [maze.Orientation.diagonal, maze.Location.left, diagonal_wall_position[1]]
if wall_on_right:
# Bois is only on the right edge of the wall
return [maze.Orientation.diagonal, maze.Location.left]
if wall_above or wall_below:
return [maze.Orientation.diagonal, diagonal_wall_position[1]]
|
python
|
import copy
import numpy as np
import pytest
import xarray as xr
from gcm_filters import Filter, FilterShape, GridType
from gcm_filters.filter import FilterSpec
def _check_equal_filter_spec(spec1, spec2):
assert spec1.n_steps_total == spec2.n_steps_total
np.testing.assert_allclose(spec1.s, spec2.s)
assert (spec1.is_laplacian == spec2.is_laplacian).all()
assert spec1.s_max == spec2.s_max
np.testing.assert_allclose(spec1.p, spec2.p, rtol=1e-07, atol=1e-07)
# These values were just hard copied from my dev environment.
# All they do is check that the results match what I got when I ran the code.
# They do NOT assure that the filter spec is correct.
@pytest.mark.parametrize(
"filter_args, expected_filter_spec",
[
(
dict(
filter_scale=10.0,
dx_min=1.0,
filter_shape=FilterShape.GAUSSIAN,
transition_width=np.pi,
ndim=2,
),
FilterSpec(
n_steps_total=10,
s=[
8.0 + 0.0j,
3.42929331 + 0.0j,
7.71587822 + 0.0j,
2.41473596 + 0.0j,
7.18021542 + 0.0j,
1.60752541 + 0.0j,
6.42502377 + 0.0j,
0.81114415 - 0.55260985j,
5.50381534 + 0.0j,
4.48146765 + 0.0j,
],
is_laplacian=[
True,
True,
True,
True,
True,
True,
True,
False,
True,
True,
],
s_max=8.0,
p=[
0.09887381,
-0.19152534,
0.1748326,
-0.14975371,
0.12112337,
-0.09198484,
0.0662522,
-0.04479323,
0.02895827,
-0.0173953,
0.00995974,
-0.00454758,
],
),
),
(
dict(
filter_scale=2.0,
dx_min=1.0,
filter_shape=FilterShape.TAPER,
transition_width=np.pi,
ndim=1,
),
FilterSpec(
n_steps_total=3,
s=[
5.23887374 - 1.09644141j,
-0.76856043 - 1.32116962j,
3.00058907 - 2.95588288j,
],
is_laplacian=[False, False, False],
s_max=4.0,
p=[
0.83380304,
-0.23622724,
-0.06554041,
0.01593978,
0.00481014,
-0.00495532,
0.00168445,
],
),
),
],
)
def test_filter_spec(filter_args, expected_filter_spec):
"""This test just verifies that the filter specification looks as expected."""
filter = Filter(**filter_args)
_check_equal_filter_spec(filter.filter_spec, expected_filter_spec)
# TODO: check other properties of filter_spec?
# define (for now: hard-code) which grids are associated with vector Laplacians
vector_grids = [gt for gt in GridType if gt.name in {"VECTOR_C_GRID"}]
# all remaining grids are for scalar Laplacians
scalar_grids = [gt for gt in GridType if gt not in vector_grids]
@pytest.fixture(scope="module", params=scalar_grids)
def grid_type_and_input_ds(request):
grid_type = request.param
ny, nx = (128, 256)
data = np.random.rand(ny, nx)
grid_vars = {}
if grid_type == GridType.REGULAR_WITH_LAND:
mask_data = np.ones_like(data)
mask_data[: (ny // 2), : (nx // 2)] = 0
da_mask = xr.DataArray(mask_data, dims=["y", "x"])
grid_vars = {"wet_mask": da_mask}
if grid_type == GridType.IRREGULAR_WITH_LAND:
mask_data = np.ones_like(data)
mask_data[: (ny // 2), : (nx // 2)] = 0
da_mask = xr.DataArray(mask_data, dims=["y", "x"])
grid_data = np.ones_like(data)
da_grid = xr.DataArray(grid_data, dims=["y", "x"])
grid_vars = {
"wet_mask": da_mask,
"dxw": da_grid,
"dyw": da_grid,
"dxs": da_grid,
"dys": da_grid,
"area": da_grid,
"kappa_w": da_grid,
"kappa_s": da_grid,
}
if grid_type == GridType.TRIPOLAR_REGULAR_WITH_LAND:
mask_data = np.ones_like(data)
mask_data[: (ny // 2), : (nx // 2)] = 0
mask_data[0, :] = 0 # Antarctica
da_mask = xr.DataArray(mask_data, dims=["y", "x"])
grid_vars = {"wet_mask": da_mask}
if grid_type == GridType.TRIPOLAR_POP_WITH_LAND:
mask_data = np.ones_like(data)
mask_data[: (ny // 2), : (nx // 2)] = 0
mask_data[0, :] = 0 # Antarctica
da_mask = xr.DataArray(mask_data, dims=["y", "x"])
grid_data = np.ones_like(data)
da_grid = xr.DataArray(grid_data, dims=["y", "x"])
grid_vars = {
"wet_mask": da_mask,
"dxe": da_grid,
"dye": da_grid,
"dxn": da_grid,
"dyn": da_grid,
"tarea": da_grid,
}
da = xr.DataArray(data, dims=["y", "x"])
return grid_type, da, grid_vars
@pytest.fixture(scope="module", params=vector_grids)
def vector_grid_type_and_input_ds(request):
grid_type = request.param
ny, nx = (128, 256)
grid_vars = {}
if grid_type == GridType.VECTOR_C_GRID:
# construct spherical coordinate system similar to MOM6 NeverWorld2 grid
# define latitudes and longitudes
lat_min = -70
lat_max = 70
lat_u = np.linspace(
lat_min + 0.5 * (lat_max - lat_min) / ny,
lat_max - 0.5 * (lat_max - lat_min) / ny,
ny,
)
lat_v = np.linspace(lat_min + (lat_max - lat_min) / ny, lat_max, ny)
lon_min = 0
lon_max = 60
lon_u = np.linspace(lon_min + (lon_max - lon_min) / nx, lon_max, nx)
lon_v = np.linspace(
lon_min + 0.5 * (lon_max - lon_min) / nx,
lon_max - 0.5 * (lon_max - lon_min) / nx,
nx,
)
(geolon_u, geolat_u) = np.meshgrid(lon_u, lat_u)
(geolon_v, geolat_v) = np.meshgrid(lon_v, lat_v)
# radius of a random planet smaller than Earth
R = 6378000 * np.random.rand(1)
# dx varies spatially
dxCu = R * np.cos(geolat_u / 360 * 2 * np.pi)
dxCv = R * np.cos(geolat_v / 360 * 2 * np.pi)
dxBu = dxCv + np.roll(dxCv, -1, axis=1)
dxT = dxCu + np.roll(dxCu, 1, axis=1)
da_dxCu = xr.DataArray(dxCu, dims=["y", "x"])
da_dxCv = xr.DataArray(dxCv, dims=["y", "x"])
da_dxBu = xr.DataArray(dxBu, dims=["y", "x"])
da_dxT = xr.DataArray(dxT, dims=["y", "x"])
# dy is set constant, equal to dx at the equator
dy = np.max(dxCu) * np.ones((ny, nx))
da_dy = xr.DataArray(dy, dims=["y", "x"])
# compute grid cell areas
area_u = dxCu * dy
area_v = dxCv * dy
da_area_u = xr.DataArray(area_u, dims=["y", "x"])
da_area_v = xr.DataArray(area_v, dims=["y", "x"])
# set isotropic and anisotropic kappas
kappa_data = np.ones((ny, nx))
da_kappa = xr.DataArray(kappa_data, dims=["y", "x"])
# put a big island in the middle
mask_data = np.ones((ny, nx))
mask_data[: (ny // 2), : (nx // 2)] = 0
da_mask = xr.DataArray(mask_data, dims=["y", "x"])
grid_vars = {
"wet_mask_t": da_mask,
"wet_mask_q": da_mask,
"dxT": da_dxT,
"dyT": da_dy,
"dxCu": da_dxCu,
"dyCu": da_dy,
"dxCv": da_dxCv,
"dyCv": da_dy,
"dxBu": da_dxBu,
"dyBu": da_dy,
"area_u": da_area_u,
"area_v": da_area_v,
"kappa_iso": da_kappa,
"kappa_aniso": da_kappa,
}
data_u = np.random.rand(ny, nx)
data_v = np.random.rand(ny, nx)
da_u = xr.DataArray(data_u, dims=["y", "x"])
da_v = xr.DataArray(data_v, dims=["y", "x"])
return grid_type, da_u, da_v, grid_vars, geolat_u
#################### Diffusion-based filter tests ########################################
@pytest.mark.parametrize(
"filter_args",
[dict(filter_scale=3.0, dx_min=1.0, n_steps=0, filter_shape=FilterShape.GAUSSIAN)],
)
def test_diffusion_filter(grid_type_and_input_ds, filter_args):
"""Test all diffusion-based filters: filters that use a scalar Laplacian."""
grid_type, da, grid_vars = grid_type_and_input_ds
filter = Filter(grid_type=grid_type, grid_vars=grid_vars, **filter_args)
filter.plot_shape()
filtered = filter.apply(da, dims=["y", "x"])
# check conservation
# this would need to be replaced by a proper area-weighted integral
da_sum = da.sum()
filtered_sum = filtered.sum()
xr.testing.assert_allclose(da_sum, filtered_sum)
# check that we get an error if we pass scalar Laplacian to .apply_to vector,
# where the latter method is for vector Laplacians only
with pytest.raises(ValueError, match=r"Provided Laplacian *"):
filtered_u, filtered_v = filter.apply_to_vector(da, da, dims=["y", "x"])
# check variance reduction
assert (filtered ** 2).sum() < (da ** 2).sum()
# check that we get an error if we leave out any required grid_vars
for gv in grid_vars:
grid_vars_missing = {k: v for k, v in grid_vars.items() if k != gv}
with pytest.raises(ValueError, match=r"Provided `grid_vars` .*"):
filter = Filter(
grid_type=grid_type, grid_vars=grid_vars_missing, **filter_args
)
bad_filter_args = copy.deepcopy(filter_args)
# check that we get an error if ndim > 2 and n_steps = 0
bad_filter_args["ndim"] = 3
bad_filter_args["n_steps"] = 0
with pytest.raises(ValueError, match=r"When ndim > 2, you .*"):
filter = Filter(grid_type=grid_type, grid_vars=grid_vars, **bad_filter_args)
# check that we get a warning if n_steps < n_steps_default
bad_filter_args["ndim"] = 2
bad_filter_args["n_steps"] = 3
with pytest.warns(UserWarning, match=r"Warning: You have set n_steps .*"):
filter = Filter(grid_type=grid_type, grid_vars=grid_vars, **bad_filter_args)
# check that we get a warning if numerical instability possible
bad_filter_args["n_steps"] = 0
bad_filter_args["filter_scale"] = 1000
with pytest.warns(UserWarning, match=r"Warning: Filter scale much larger .*"):
filter = Filter(grid_type=grid_type, grid_vars=grid_vars, **bad_filter_args)
#################### Visosity-based filter tests ########################################
@pytest.mark.parametrize(
"filter_args",
[dict(filter_scale=1.0, dx_min=1.0, n_steps=10, filter_shape=FilterShape.TAPER)],
)
def test_viscosity_filter(vector_grid_type_and_input_ds, filter_args):
"""Test all viscosity-based filters: filters that use a vector Laplacian."""
grid_type, da_u, da_v, grid_vars, geolat_u = vector_grid_type_and_input_ds
filter = Filter(grid_type=grid_type, grid_vars=grid_vars, **filter_args)
filtered_u, filtered_v = filter.apply_to_vector(da_u, da_v, dims=["y", "x"])
# check conservation under solid body rotation: u = cos(lat), v=0;
data_u = np.cos(geolat_u / 360 * 2 * np.pi)
data_v = np.zeros_like(data_u)
da_u = xr.DataArray(data_u, dims=["y", "x"])
da_v = xr.DataArray(data_v, dims=["y", "x"])
filtered_u, filtered_v = filter.apply_to_vector(da_u, da_v, dims=["y", "x"])
xr.testing.assert_allclose(filtered_u, da_u, atol=1e-12)
xr.testing.assert_allclose(filtered_v, da_v, atol=1e-12)
# check that we get an error if we pass vector Laplacian to .apply, where
# the latter method is for scalar Laplacians only
with pytest.raises(ValueError, match=r"Provided Laplacian *"):
filtered_u = filter.apply(da_u, dims=["y", "x"])
# check that we get an error if we leave out any required grid_vars
for gv in grid_vars:
grid_vars_missing = {k: v for k, v in grid_vars.items() if k != gv}
with pytest.raises(ValueError, match=r"Provided `grid_vars` .*"):
filter = Filter(
grid_type=grid_type, grid_vars=grid_vars_missing, **filter_args
)
|
python
|
import configparser
import logging
import os
import shutil
from pathlib import Path
from urllib.error import URLError
import intake
import matplotlib.image as mplimg
import pandas as pd
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
pkg_name = __name__.split(".")[0]
configpath = Path.home() / ".{}.ini".format(pkg_name)
LOGGER = logging.getLogger(__name__)
def get_config():
"""Read the configfile and return config dict.
Returns
-------
dict
Dictionary with the content of the configpath file.
"""
if not configpath.exists():
raise IOError("Config file {} not found.".format(str(configpath)))
else:
config = configparser.ConfigParser()
config.read(str(configpath))
return config
def get_data_root():
d = get_config()
data_root = Path(d["planet4_db"]["path"]).expanduser()
data_root.mkdir(exist_ok=True, parents=True)
return data_root
def set_database_path(dbfolder):
"""Use to write the database path into the config.
Parameters
----------
dbfolder : str or pathlib.Path
Path to where planet4 will store clustering results by default.
"""
try:
d = get_config()
except IOError:
d = configparser.ConfigParser()
d["planet4_db"] = {}
d["planet4_db"]["path"] = dbfolder
with configpath.open("w") as f:
d.write(f)
print("Saved database path into {}.".format(configpath))
# module global data_root !
if not configpath.exists():
print("No configuration file {} found.\n".format(configpath))
savepath = input("Please provide the path where you want to store planet4 meta-data:")
set_database_path(savepath)
data_root = get_data_root()
def get_subframe(url):
"""Download image if not there yet and return numpy array.
Takes a data record (called 'line'), picks out the image_url.
First checks if the name of that image is already stored in
the image path. If not, it grabs it from the server.
Then uses matplotlib.image to read the image into a numpy-array
and finally returns it.
"""
targetpath = data_root / "images" / os.path.basename(url)
targetpath.parent.mkdir(exist_ok=True)
if not targetpath.exists():
LOGGER.info("Did not find image in cache. Downloading ...")
try:
path = urlretrieve(url)[0]
except URLError:
msg = "Image not in cache. Cannot download subframe image. No internet?"
LOGGER.error(msg)
return None
LOGGER.debug("Done.")
shutil.move(path, str(targetpath))
else:
LOGGER.debug("Found image in cache.")
im = mplimg.imread(targetpath)
return im
def get_url_for_tile_id(tile_id):
storagepath = data_root / "catalogs/tile_urls.csv"
storagepath.parent.mkdir(exist_ok=True)
if not storagepath.exists():
urls = intake.cat.planet4.tile_urls.read()
urls.to_csv(storagepath, index=False)
urls = urls.set_index("tile_id").squeeze()
else:
urls = pd.read_csv(storagepath).set_index("tile_id").squeeze()
return urls.at[tile_id]
def get_intake_p4_item(item_name, update=False):
fname = item_name + ".csv"
storagepath = data_root / f"catalogs/{fname}"
storagepath.parent.mkdir(exist_ok=True, parents=True)
if not storagepath.exists() or update is True:
s = "Downloading catalog"
if update:
s + " for update"
print(s)
df = getattr(intake.cat.planet4, item_name).read()
df.to_csv(storagepath, index=False)
else:
df = pd.read_csv(storagepath)
return df
def get_blotch_catalog(update=False):
return get_intake_p4_item("blotches", update)
def get_fan_catalog(update=False):
return get_intake_p4_item("fans", update)
def get_tile_coordinates(update=False):
return get_intake_p4_item("tile_coordinates", update)
def get_meta_data(update=False):
return get_intake_p4_item("meta_data", update)
def get_region_names(update=False):
return get_intake_p4_item("region_names", update)
def get_tile_urls(update=False):
return get_intake_p4_item("tile_urls", update)
def update_local_catalog_files():
for item in "blotches fans tile_coordinates meta_data region_names tile_urls".split():
print("Updating", item)
get_intake_p4_item(item, update=True)
|
python
|
class Instance(Element,IDisposable):
""" The base class for all instance objects. """
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def getBoundingBox(self,*args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def GetTotalTransform(self):
"""
GetTotalTransform(self: Instance) -> Transform
Gets the total transform,which includes the true north transform for instances
like import instances.
Returns: The calculated total transform.
"""
pass
def GetTransform(self):
"""
GetTransform(self: Instance) -> Transform
Gets the transform of the instance.
Returns: The inherent transform.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def setElementType(self,*args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
|
python
|
from layers import *
from encoding import *
import matplotlib.pyplot as plt
import csv
import sys
import getopt
import random
# Path to save the parameters
filename = 'parameters.npz'
# Train the RNN with the given parameters
def train(learning_rate, units, epochs):
# Try to load the parameters if they are saved, create a new RNN with the specified units otherwise
rnn = RNN(filename=filename, units=units)
# Extract the strain names from the dataset
with open('cannabis.csv', newline='', encoding="utf-8") as csvfile:
cannabis_data = csv.reader(csvfile)
names_oh = []
excluded_names = 0
print('Loading weed strain names from database...')
# The first column of the data contains the strain name
for row in cannabis_data:
# Replace syphons with spaces
name = row[0].replace('-', ' ').lower()
# Add the end token to the name
name = name + '>'
# Convert to one-hot vector and append to the array
valid, name_oh = one_hot_string(name)
# Only append the name if it's valid(no numbers in it)
if valid:
names_oh.append(name_oh)
else:
excluded_names += 1
# First row is metadata so delete it
names_oh = names_oh[1:]
print('{} names were excluded because they contained numbers or other invalid characters. {} names remain.'.format(excluded_names, len(names_oh)))
# Keep track of the average cost in each epoch
costs = []
print('==============================================')
print('Training for {} epochs with learning_rate={}'.format(epochs, learning_rate))
for e in range(epochs):
cost = 0
for name_oh in names_oh:
# Apply forward-propagation
cost += rnn(name_oh)
# Backpropagate and update weights of the RNN
rnn.backpropagate()
rnn.update_weights(learning_rate)
cost /= len(names_oh)
print('(Epoch {}/{}) Cost = {}'.format(e + 1, epochs, cost), end='\r')
costs.append(cost)
print('Training finished, Cost: {} -> {}'.format(costs[0], costs[-1]))
print('==============================================')
# Save the updated parameters
rnn.save_parameters(filename)
# Plot the cost in each epoch
plt.plot(costs, color='r')
# Change the name of the window
fig = plt.gcf()
fig.canvas.set_window_title('WEED LMAO')
plt.ylabel('Cost')
plt.xlabel('Epoch')
plt.show()
# Generate a name with the trained RNN
def gen_names():
# Load the RNN from file
rnn = RNN(filename=filename)
print('Input how the name should start. Leave blank if you want it completely random and type \\ to exit')
while True:
# Get the user's chosen start for the strain name, and lowercase it
start = input().lower()
if start == '\\':
return
# Start with random letter if no input is given
if start == '':
# Only pick a letter, don't start with space or end-token
start = letters[random.randint(1, n_letters - 2)]
# Generate the string if the input is valid
valid, gen_strain = rnn.gen_name(start)
if valid:
print(gen_strain)
else:
print('Input contains invalid characters. Only use letters a-z and spaces.')
def train_args(arg_list):
opts, arga = getopt.getopt(arg_list, 'r:u:e:')
learning_rate = 0.07
units = 32
epochs = 100
for opt, value in opts:
if opt == '-r':
learning_rate = float(value)
if opt == '-u':
units = int(value)
if opt == '-e':
epochs = int(value)
train(learning_rate, units, epochs)
if __name__ == '__main__':
if sys.argv[1] == 'train':
train_args(sys.argv[2:])
if sys.argv[1] == 'generate':
gen_names()
|
python
|
def selection_sort(some_list):
"""
https://en.wikipedia.org/wiki/Selection_sort
Split the list into a sorted/unsorted portion. Go through the list from left to right, starting with position 0 in
the unsorted portion. When we find the minimum element of the unsorted portion, swap it to the end of the sorted
list portion.
O(N^2)
"""
iters = 0
for i in range(0, len(some_list) - 1):
iters += 1
min_index = i # Always reset min for each loop
for j in range(i + 1, len(some_list)):
iters += 1
if some_list[j] < some_list[min_index]:
min_index = j
if min_index != i:
some_list[i], some_list[min_index] = some_list[min_index], some_list[i]
return iters, some_list
|
python
|
"""
Boolean Satisfiability
Interface Classes:
DPLLInterface
Interface Functions:
backtrack
iter_backtrack
dpll
"""
import random
class DPLLInterface(object):
"""DPLL algorithm interface"""
def bcp(self):
"""Boolean Constraint Propagation
Return an untyped point that results from unit propagation.
If BCP detects a contradiction, return None.
"""
raise NotImplementedError()
def ple(self):
"""Pure Literal Elimination
Return an untyped point that results from pure literal elimination.
If PLE detects a contradiction, return None.
"""
raise NotImplementedError()
def backtrack(bf):
"""
If this function is satisfiable, return a satisfying input upoint.
Otherwise, return None.
"""
if bf.is_zero():
ret = None
elif bf.is_one():
ret = frozenset(), frozenset()
else:
v = bf.top
#v = random.choice(bf.inputs)
upnt0 = frozenset([v.uniqid]), frozenset()
upnt1 = frozenset(), frozenset([v.uniqid])
for upnt in [upnt0, upnt1]:
bt_upnt = backtrack(bf.urestrict(upnt))
if bt_upnt is not None:
ret = (upnt[0] | bt_upnt[0], upnt[1] | bt_upnt[1])
break
else:
ret = None
return ret
def iter_backtrack(bf, rand=False):
"""Iterate through all satisfying points using backtrack algorithm."""
if bf.is_one():
yield frozenset(), frozenset()
elif not bf.is_zero():
if rand:
v = random.choice(bf.inputs) if rand else bf.top
else:
v = bf.top
upnt0 = frozenset([v.uniqid]), frozenset()
upnt1 = frozenset(), frozenset([v.uniqid])
upoints = [upnt0, upnt1]
if rand:
random.shuffle(upoints)
for upnt in upoints:
for bt_upnt in iter_backtrack(bf.urestrict(upnt), rand):
yield (upnt[0] | bt_upnt[0], upnt[1] | bt_upnt[1])
def dpll(cnf):
"""
Davis-Putnam-Logemann-Loveland (DPLL) Algorithm
"""
if cnf.is_zero():
ret = None
elif cnf.is_one():
ret = frozenset(), frozenset()
else:
# 1. Boolean constraint propagation
bcp_upnt = cnf.bcp()
if bcp_upnt is None:
# BCP found a contradiction
ret = None
else:
bcp_cnf = cnf.urestrict(bcp_upnt)
if bcp_cnf.is_one():
# BCP found a solution
ret = bcp_upnt
else:
# 2. Pure literal elimination
ple_upnt = bcp_cnf.ple()
bcp_ple_cnf = bcp_cnf.urestrict(ple_upnt)
bcp_ple_upnt = (bcp_upnt[0] | ple_upnt[0],
bcp_upnt[1] | ple_upnt[1])
if bcp_ple_cnf.is_one():
# PLE found a solution
ret = bcp_ple_upnt
else:
# 3. Variable selection heuristic
v = bcp_ple_cnf.top
#v = random.choice(bcp_ple_cnf.inputs)
# 4. Backtrack
upnt0 = (bcp_ple_upnt[0] | {v.uniqid}, bcp_ple_upnt[1])
upnt1 = (bcp_ple_upnt[0], bcp_ple_upnt[1] | {v.uniqid})
for upnt in [upnt0, upnt1]:
bt_upnt = dpll(bcp_ple_cnf.urestrict(upnt))
if bt_upnt is not None:
# Backtrack found a solution
ret = (upnt[0] | bt_upnt[0], upnt[1] | bt_upnt[1])
break
else:
# Backtrack found a contradiction
ret = None
return ret
|
python
|
import numpy as np
class Constant(object):
"""
Concatenates a constant value to the node attributes.
**Arguments**
- `value`: the value to concatenate to the node attributes.
"""
def __init__(self, value):
self.value = value
def __call__(self, graph):
value = np.zeros((graph.n_nodes, 1)) + self.value
if graph.x is None:
graph.x = value
else:
graph.x = np.concatenate((graph.x, value), axis=-1)
return graph
|
python
|
import glob
from os import path as osp
import numpy as np
import pytest
import tqdm
import habitat_sim
NUM_TESTS = 100
TURN_DEGREE = 30.0
ACCEPTABLE_SPLS = {
("try_step", False): 0.97,
("try_step_no_sliding", False): 0.925,
("try_step", True): 0.82,
("try_step_no_sliding", True): 0.60,
}
base_dir = osp.abspath(osp.join(osp.dirname(__file__), ".."))
test_navmeshes = [
osp.join(base_dir, "data/scene_datasets/mp3d/17DRP5sb8fy/17DRP5sb8fy.navmesh"),
osp.join(
base_dir, "data/scene_datasets/habitat-test-scenes/skokloster-castle.navmesh"
),
osp.join(base_dir, "data/scene_datasets/habitat-test-scenes/van-gogh-room.navmesh"),
]
test_all = False
gibson_base = osp.join(base_dir, "data/scene_datasets/gibson")
if test_all and osp.exists(gibson_base):
test_navmeshes += glob.glob(f"{gibson_base}/*.navmesh")
mp3d_base = osp.join(base_dir, "data/scene_datasets/mp3d")
if test_all and osp.exists(mp3d_base):
test_navmeshes += glob.glob(f"{mp3d_base}/*/*.navmesh")
mp3d_example_base = osp.join(base_dir, "data/scene_datasets/mp3d_example")
if test_all and osp.exists(mp3d_example_base):
test_navmeshes += glob.glob(f"{mp3d_example_base}/*/*.navmesh")
@pytest.fixture(scope="module")
def pbar():
if test_all:
return tqdm.tqdm(total=len(test_navmeshes) * NUM_TESTS)
else:
return None
num_fails = 0.0
num_tested = 0
total_spl = 0.0
@pytest.mark.parametrize("test_navmesh", test_navmeshes)
@pytest.mark.parametrize("move_filter_fn", ["try_step", "try_step_no_sliding"])
@pytest.mark.parametrize("action_noise", [False, True])
def test_greedy_follower(test_navmesh, move_filter_fn, action_noise, pbar):
global num_fails
global num_tested
global total_spl
if not osp.exists(test_navmesh):
pytest.skip(f"{test_navmesh} not found")
pathfinder = habitat_sim.PathFinder()
pathfinder.load_nav_mesh(test_navmesh)
assert pathfinder.is_loaded
pathfinder.seed(0)
np.random.seed(seed=0)
scene_graph = habitat_sim.SceneGraph()
agent = habitat_sim.Agent(scene_graph.get_root_node().create_child())
agent.controls.move_filter_fn = getattr(pathfinder, move_filter_fn)
agent.agent_config.action_space["turn_left"].actuation.amount = TURN_DEGREE
agent.agent_config.action_space["turn_right"].actuation.amount = TURN_DEGREE
if action_noise:
# "_" prefix the perfect actions so that we can use noisy actions instead
agent.agent_config.action_space = {
"_" + k: v for k, v in agent.agent_config.action_space.items()
}
agent.agent_config.action_space.update(
**dict(
move_forward=habitat_sim.ActionSpec(
"pyrobot_noisy_move_forward",
habitat_sim.PyRobotNoisyActuationSpec(amount=0.25),
),
turn_left=habitat_sim.ActionSpec(
"pyrobot_noisy_turn_left",
habitat_sim.PyRobotNoisyActuationSpec(amount=TURN_DEGREE),
),
turn_right=habitat_sim.ActionSpec(
"pyrobot_noisy_turn_right",
habitat_sim.PyRobotNoisyActuationSpec(amount=TURN_DEGREE),
),
)
)
follower = habitat_sim.GreedyGeodesicFollower(
pathfinder,
agent,
forward_key="move_forward",
left_key="turn_left",
right_key="turn_right",
)
test_spl = 0.0
for _ in range(NUM_TESTS):
follower.reset()
state = habitat_sim.AgentState()
while True:
state.position = pathfinder.get_random_navigable_point()
goal_pos = pathfinder.get_random_navigable_point()
path = habitat_sim.ShortestPath()
path.requested_start = state.position
path.requested_end = goal_pos
if pathfinder.find_path(path) and path.geodesic_distance > 2.0:
break
agent.state = state
failed = False
gt_geo = path.geodesic_distance
agent_distance = 0.0
last_xyz = state.position
num_acts = 0
# If there is not action noise, then we can use find_path to get all the actions
if not action_noise:
try:
action_list = follower.find_path(goal_pos)
except habitat_sim.errors.GreedyFollowerError:
action_list = [None]
while True:
# If there is action noise, we need to plan a single action, actually take it, and repeat
if action_noise:
try:
next_action = follower.next_action_along(goal_pos)
except habitat_sim.errors.GreedyFollowerError:
break
else:
next_action = action_list[0]
action_list = action_list[1:]
if next_action is None:
break
agent.act(next_action)
agent_distance += np.linalg.norm(last_xyz - agent.state.position)
last_xyz = agent.state.position
num_acts += 1
if num_acts > 1e4:
break
end_state = agent.state
path.requested_start = end_state.position
pathfinder.find_path(path)
failed = path.geodesic_distance > follower.forward_spec.amount
spl = float(not failed) * gt_geo / max(gt_geo, agent_distance)
test_spl += spl
if test_all:
num_fails += float(failed)
num_tested += 1
total_spl += spl
pbar.set_postfix(
num_fails=num_fails,
failure_rate=num_fails / num_tested,
spl=total_spl / num_tested,
)
pbar.update()
if not test_all:
assert test_spl / NUM_TESTS >= ACCEPTABLE_SPLS[(move_filter_fn, action_noise)]
|
python
|
""" Views related to rsync or FTP account access. """
__author__ = "William Tucker"
__date__ = "2018-03-13"
__copyright__ = "Copyright 2019 United Kingdom Research and Innovation"
__license__ = "BSD - see LICENSE file in top-level package directory"
from django.shortcuts import render, redirect
from uploader.ftp.forms import FtpPasswordChangeForm
from uploader.ftp.utils import generate_visible_ftp_password, set_ftp_password
def ftp_random_password(request):
generate_visible_ftp_password(request.user)
return redirect('browse')
def ftp_access(request):
if request.method=='POST':
form = FtpPasswordChangeForm(request.POST)
if form.is_valid():
cleaned_data = form.cleaned_data
password = cleaned_data.get('password')
set_ftp_password(request.user, password)
return redirect('browse')
else:
form = FtpPasswordChangeForm()
return render(request, 'uploader/ftp/access.html', {'form': form})
|
python
|
from dataclasses import dataclass, field
from typing import Optional
# TODO: remove default Hydra pallets - pallets will become required parameter
PALLETS = ["amm", "exchange", "transaction_multi_payment"]
@dataclass
class Config:
do_db_bench: bool = False
substrate_repo_path: str = "./substrate"
do_pallet_bench: bool = True
performance_check: bool = False
reference_values: Optional[str] = None
dump_results: Optional[str] = None
# Directory
# TODO: support for file ( but if multiple pallets in one run - different files ?)
output_dir: Optional[str] = None
template: Optional[str] = None
pallets: [str] = field(default_factory=lambda: PALLETS)
|
python
|
import pyaudio
class AudioRecorder:
def __init__(self, channels_=2, format_=pyaudio.paInt16, rate_=44100, chunk_=256):
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=format_, channels=channels_,
rate=rate_, input=True, frames_per_buffer=chunk_)
self.channels = channels_
self.format = format_
self.rate = rate_
self.chunk = chunk_
def record_chunk(self):
return self.stream.read(self.chunk)
def __enter__(self):
return self
def __exit__(self, *arg):
self.stream.stop_stream()
self.stream.close()
self.audio.terminate()
class AudioPlayer:
def __init__(self, channels_=2, format_=pyaudio.paInt16, rate_=44100, chunk_=256):
self.audio = pyaudio.PyAudio()
self.stream = self.audio.open(format=format_, channels=channels_,
rate=rate_, output=True)
self.channels = channels_
self.format = format_
self.rate = rate_
self.chunk = chunk_
def play_chunk(self, chunk):
self.stream.write(chunk)
def __enter__(self):
return self
def __exit__(self, *arg):
self.stream.stop_stream()
self.stream.close()
self.audio.terminate()
|
python
|
import argparse
import logging
import gdk.commands.methods as methods
import gdk.common.parse_args_actions as actions
import pytest
def test_run_command_with_valid_namespace_without_debug(mocker):
# Integ test that appropriate action is called only once with valid command namespace.
args_namespace = argparse.Namespace(component="init", init=None, lang="python", template="name", **{"gdk": "component"})
spy_component_build = mocker.spy(methods, "_gdk_component_build")
spy_call_action_by_name = mocker.spy(actions, "call_action_by_name")
spy_get_method_from_command = mocker.spy(actions, "get_method_from_command")
spy_logger = mocker.spy(logging, "basicConfig")
mock_component_init = mocker.patch("gdk.commands.methods._gdk_component_init", return_value=None)
actions.run_command(args_namespace)
assert mock_component_init.call_count == 1
assert spy_component_build.call_count == 0
assert spy_call_action_by_name.call_count == 1
assert spy_get_method_from_command.call_count == 3 # Recursively called for three times
assert spy_logger.call_count == 0
def test_run_command_with_valid_debug_enabled(mocker):
# Integ test that appropriate action is called only once with valid command namespace.
args_namespace = argparse.Namespace(
component="init", init=None, lang="python", template="name", **{"gdk": "component"}, debug=True
)
spy_component_build = mocker.spy(methods, "_gdk_component_build")
spy_call_action_by_name = mocker.spy(actions, "call_action_by_name")
spy_get_method_from_command = mocker.spy(actions, "get_method_from_command")
mock_component_init = mocker.patch("gdk.commands.methods._gdk_component_init", return_value=None)
spy_logging_ = mocker.spy(logging.getLogger(), "setLevel")
actions.run_command(args_namespace)
assert mock_component_init.call_count == 1
assert spy_component_build.call_count == 0
assert spy_call_action_by_name.call_count == 1
assert spy_get_method_from_command.call_count == 3 # Recursively called for three times
spy_logging_.assert_called_once_with(logging.DEBUG)
with pytest.raises(AssertionError):
spy_logging_.assert_called_once_with(logging.WARN)
def test_run_command_with_invalid_namespace_method(mocker):
# Test that action when the method doesn't exist for an invalid namespace
args_namespace = argparse.Namespace(component="invalid", invalid=None, **{"gdk": "component"})
spy_get_method_from_command = mocker.spy(actions, "get_method_from_command")
spy_call_action_by_name = mocker.spy(actions, "call_action_by_name")
with pytest.raises(SystemExit):
actions.run_command(args_namespace)
assert spy_call_action_by_name.call_count == 1 # No method name to call if namespace is invalid
assert spy_get_method_from_command.call_count == 3 # Recursively called for three times
|
python
|
# -*- encoding: utf-8 -*-
from django import forms
from .models import Image, UserProfile, Establishment
from django.contrib.auth.models import User
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.forms.widgets import TextInput, PasswordInput
from mysite.widgets import MyClearableFileInput
from municipios.widgets import SelectMunicipioWidget
class FormEstablishment(forms.ModelForm):
class Meta:
model = Establishment
fields = ('name', 'address', 'ec_type', 'img_logo', 'img_vitrin', 'cnpj', 'insc_est', 'phone',
'site', 'email', 'zip_code')
widgets = {
"img_vitrin": MyClearableFileInput(),
"img_logo": MyClearableFileInput(),
"address": SelectMunicipioWidget(),
}
def __init__(self, *args, **kwargs):
super(FormEstablishment, self).__init__(*args, **kwargs)
self.fields['name'].widget.attrs = {'class': 'form-control', 'placeholder': 'Nome'}
self.fields['address'].widget.attrs = {'class': 'form-control'}
self.fields['ec_type'].widget.attrs = {'class': 'form-control'}
self.fields['img_logo'].required = False
self.fields['img_logo'].widget.attrs = {'class': 'form-control'}
self.fields['img_vitrin'].required = False
self.fields['img_vitrin'].widget.attrs = {'class': 'form-control'}
self.fields['phone'].widget.attrs = {'class': 'form-control', 'placeholder': 'Telefone'}
self.fields['email'].widget.attrs = {'class': 'form-control', 'placeholder': 'E-mail'}
self.fields['site'].required = False
self.fields['site'].widget.attrs = {'class': 'form-control', 'placeholder': 'Site'}
self.fields['zip_code'].widget.attrs = {'class': 'form-control', 'placeholder': 'Cep'}
self.fields['cnpj'].required = False
self.fields['cnpj'].widget.attrs = {'class': 'form-control', 'placeholder': 'CNPJ'}
self.fields['insc_est'].required = False
self.fields['insc_est'].widget.attrs = {'class': 'form-control', 'placeholder': 'Incrição Estadual'}
class WableAuthenticationForm(AuthenticationForm):
username = forms.CharField(widget=TextInput(attrs={'class': 'form-control', 'placeholder': 'E-mail'}))
password = forms.CharField(widget=PasswordInput(attrs={'class': 'form-control', 'placeholder':'Senha'}))
class WableRegistrationForm(UserCreationForm):
email = forms.EmailField()
class Meta:
model = User
fields = ('first_name', 'last_name', 'username', 'password1', 'password2', 'email')
def __init__(self, *args, **kwargs):
super(WableRegistrationForm, self).__init__(*args, **kwargs)
self.fields['first_name'].required = True
self.fields['first_name'].widget.attrs = {'class': 'form-control', 'placeholder': 'Nome'}
self.fields['last_name'].required = True
self.fields['last_name'].widget.attrs = {'class': 'form-control', 'placeholder': 'Sobrenome'}
self.fields['email'].required = False
self.fields['email'].widget.attrs = {'class': 'form-control', 'placeholder': 'E-mail'}
self.fields['username'].widget.attrs = {'class': 'form-control', 'placeholder': 'E-mail ou número do celular'}
self.fields['password1'].widget.attrs = {'class': 'form-control', 'placeholder': 'Senha'}
self.fields['password2'].widget.attrs = {'class': 'form-control', 'placeholder': 'Confirme a senha'}
def save(self, commit=True):
user = super(WableRegistrationForm, self).save(commit=False)
user.email = self.cleaned_data["email"]
if commit:
user.save()
return user
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
def __init__(self, *args, **kwargs):
super(UserForm, self).__init__(*args, **kwargs)
self.fields['first_name'].widget.attrs = {'class': 'form-control', 'placeholder': 'Nome'}
self.fields['last_name'].widget.attrs = {'class': 'form-control', 'placeholder': 'Sobrenome'}
self.fields['email'].widget.attrs = {'class': 'form-control', 'placeholder': 'E-mail'}
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('phone', 'birthday', 'image_field', 'address')
widgets = {
"image_field": MyClearableFileInput(),
"address": SelectMunicipioWidget(),
}
def __init__(self, *args, **kwargs):
super(UserProfileForm, self).__init__(*args, **kwargs)
self.fields['image_field'].required = False
self.fields['image_field'].widget.attrs = {'onChange': 'readFile(this);'}
self.fields['birthday'].required = False
self.fields['birthday'].widget.attrs = {'class': 'form-control', 'placeholder': 'dd/mm/aaaa'}
self.fields['phone'].widget.attrs = {'class': 'form-control', 'placeholder': 'Telefone'}
self.fields['address'].widget.attrs = {'class': 'form-control'}
class ImageForm(forms.ModelForm):
class Meta:
model = Image
fields = ('image_field', 'cropping_free')
labels = {
'image_field': (''),
}
def __init__(self, *args, **kwargs):
super(ImageForm, self).__init__(*args, **kwargs)
self.fields['image_field'].widget.attrs = {'onChange': 'readURL(this);'}
|
python
|
#!/usr/bin/python
# Copyright 2012 Google Inc. All Rights Reserved.
# Author: mrdmnd@ (Matt Redmond)
# Based off of code in //depot/google3/experimental/mobile_gwp
"""Code to transport profile data between a user's machine and the CWP servers.
Pages:
"/": the main page for the app, left blank so that users cannot access
the file upload but left in the code for debugging purposes
"/upload": Updates the datastore with a new file. the upload depends on
the format which is templated on the main page ("/")
input includes:
profile_data: the zipped file containing profile data
board: the architecture we ran on
chromeos_version: the chromeos_version
"/serve": Lists all of the files in the datastore. Each line is a new entry
in the datastore. The format is key~date, where key is the entry's
key in the datastore and date is the file upload time and date.
(Authentication Required)
"/serve/([^/]+)?": For downloading a file of profile data, ([^/]+)? means
any character sequence so to download the file go to
'/serve/$key' where $key is the datastore key of the file
you want to download.
(Authentication Required)
"/del/([^/]+)?": For deleting an entry in the datastore. To use go to
'/del/$key' where $key is the datastore key of the entry
you want to be deleted form the datastore.
(Authentication Required)
TODO: Add more extensive logging"""
import cgi
import logging
import md5
import urllib
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
logging.getLogger().setLevel(logging.DEBUG)
class FileEntry(db.Model):
profile_data = db.BlobProperty() # The profile data
date = db.DateTimeProperty(auto_now_add=True) # Date it was uploaded
data_md5 = db.ByteStringProperty() # md5 of the profile data
board = db.StringProperty() # board arch
chromeos_version = db.StringProperty() # ChromeOS version
class MainPage(webapp.RequestHandler):
"""Main page only used as the form template, not actually displayed."""
def get(self, response=''): # pylint: disable-msg=C6409
if response:
self.response.out.write('<html><body>')
self.response.out.write("""<br>
<form action="/upload" enctype="multipart/form-data" method="post">
<div><label>Profile Data:</label></div>
<div><input type="file" name="profile_data"/></div>
<div><label>Board</label></div>
<div><input type="text" name="board"/></div>
<div><label>ChromeOS Version</label></div>
<div><input type="text" name="chromeos_version"></div>
<div><input type="submit" value="send" name="submit"></div>
</form>
</body>
</html>""")
class Upload(webapp.RequestHandler):
"""Handler for uploading data to the datastore, accessible by anyone."""
def post(self): # pylint: disable-msg=C6409
"""Takes input based on the main page's form."""
getfile = FileEntry()
f1 = self.request.get('profile_data')
getfile.profile_data = db.Blob(f1)
getfile.data_md5 = md5.new(f1).hexdigest()
getfile.board = self.request.get('board')
getfile.chromeos_version = self.request.get('chromeos_version')
getfile.put()
self.response.out.write(getfile.key())
#self.redirect('/')
class ServeHandler(webapp.RequestHandler):
"""Given the entry's key in the database, output the profile data file. Only
accessible from @google.com accounts."""
def get(self, resource): # pylint: disable-msg=C6409
if Authenticate(self):
file_key = str(urllib.unquote(resource))
request = db.get(file_key)
self.response.out.write(request.profile_data)
class ListAll(webapp.RequestHandler):
"""Displays all files uploaded. Only accessible by @google.com accounts."""
def get(self): # pylint: disable-msg=C6409
"""Displays all information in FileEntry, ~ delimited."""
if Authenticate(self):
query_str = 'SELECT * FROM FileEntry ORDER BY date ASC'
query = db.GqlQuery(query_str)
delimiter = '~'
for item in query:
display_list = [item.key(), item.date, item.data_md5, item.board,
item.chromeos_version]
str_list = [cgi.escape(str(i)) for i in display_list]
self.response.out.write(delimiter.join(str_list) + '</br>')
class DelEntries(webapp.RequestHandler):
"""Deletes entries. Only accessible from @google.com accounts."""
def get(self, resource): # pylint: disable-msg=C6409
"""A specific entry is deleted, when the key is given."""
if Authenticate(self):
fkey = str(urllib.unquote(resource))
request = db.get(fkey)
if request:
db.delete(fkey)
def Authenticate(webpage):
"""Some urls are only accessible if logged in with a @google.com account."""
user = users.get_current_user()
if user is None:
webpage.redirect(users.create_login_url(webpage.request.uri))
elif user.email().endswith('@google.com'):
return True
else:
webpage.response.out.write('Not Authenticated')
return False
def main():
application = webapp.WSGIApplication(
[
('/', MainPage),
('/upload', Upload),
('/serve/([^/]+)?', ServeHandler),
('/serve', ListAll),
('/del/([^/]+)?', DelEntries),
],
debug=False)
run_wsgi_app(application)
if __name__ == '__main__':
main()
|
python
|
from typing import Iterable
import torch
from torch import Tensor
def to_np(arr):
return arr.detach().cpu().numpy()
def to_t(t: Iterable, device: torch.device = 'cuda', dtype: torch.dtype = torch.float64) -> Tensor:
if isinstance(t, Tensor):
return t
return torch.tensor(t, device=device, dtype=dtype)
@torch.jit.script
def pi() -> float:
return torch.acos(torch.tensor(0., dtype=torch.float64)).item() * 2
@torch.jit.script
def length(t: Tensor) -> Tensor:
return torch.sqrt((t ** 2).sum(-1))
@torch.jit.script
def norm(t: Tensor) -> Tensor:
t_length = length(t)
if t_length > 0:
return t / t_length
return t
@torch.jit.script
def get_2d_vector(vec: Tensor):
return torch.stack([
torch.sqrt(torch.sum(vec[..., :2] ** 2, dim=-1)),
vec[..., 2],
], -1)
|
python
|
"""PivotCalculator
Pivot points is the top/bottom that the price has ever reached.
"""
from collections import deque, namedtuple
from operator import gt
class PivotCalculator(object):
def __init__(self, window_size=5, cmp=gt):
self.window_size = window_size
self.cmp = cmp
# exit_check: whether it should be considered as a local extrim
# when it get removed from the qeue
self.QE = namedtuple("QueueEelment", ["val", "idx", "exit_check"])
self._q = deque() # queue to hold the local extrim candidates
self._idx = 0 # index of the current value to be processed.
self._result = []
self._post_process_done = False
def __call__(self, v):
is_extrim = False
# XXX: local extrim <=> if ENTER and EXIT checks are both True
# ENTER: if it is a local extrim when it enters the queue
# there should be no other element in the queue
while self._q and self.cmp(v, self._q[-1][0]):
self._q.pop()
exit_check = not self._q
t = self.QE(v, self._idx, exit_check)
self._q.append(t)
# EXIT: if it is a local extrim point when it leaves the queue
# it should be still the best candidate (in the front).
candidate = self._q[0]
# e.g. windows_size = 5, candidate.idx = 0, self._idx = 4
if self._idx - candidate.idx >= self.window_size - 1:
self._q.popleft()
if candidate.exit_check:
is_extrim = True
# DEBUG:
#print(self._idx, "{:.2f}".format(v), self._q[0] if self._q else [],
# ["{:.2f}".format(e[0]) for e in self._q],
# self._idx - self.window_size, result)
# Only after seeing window_size of elements we can tell if a local extrim is found or not.
if self._idx >= self.window_size - 1:
self._result.append(is_extrim)
self._idx += 1
def _post(self):
for i in range(self._idx - self.window_size + 1, self._idx):
# XXX: there should be maximum window_size-1 of elements left to be examined.
# and only the first element is possible to be an extrim.
is_extrim = self._q and self._q[0].idx == i and self._q[0].exit_check
self._result.append(is_extrim)
self._q.clear()
@property
def result(self):
if not self._post_process_done:
self._post_process_done = True
self._post()
return self._result
|
python
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['EnterprisePolicyArgs', 'EnterprisePolicy']
@pulumi.input_type
class EnterprisePolicyArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
encryption: Optional[pulumi.Input['PropertiesEncryptionArgs']] = None,
enterprise_policy_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['EnterprisePolicyIdentityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
lockbox: Optional[pulumi.Input['PropertiesLockboxArgs']] = None,
network_injection: Optional[pulumi.Input['PropertiesNetworkInjectionArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a EnterprisePolicy resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input['PropertiesEncryptionArgs'] encryption: The encryption settings for a configuration store.
:param pulumi.Input[str] enterprise_policy_name: Name of the EnterprisePolicy.
:param pulumi.Input['EnterprisePolicyIdentityArgs'] identity: The identity of the EnterprisePolicy.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input['PropertiesLockboxArgs'] lockbox: Settings concerning lockbox.
:param pulumi.Input['PropertiesNetworkInjectionArgs'] network_injection: Settings concerning network injection.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if encryption is not None:
pulumi.set(__self__, "encryption", encryption)
if enterprise_policy_name is not None:
pulumi.set(__self__, "enterprise_policy_name", enterprise_policy_name)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if lockbox is not None:
pulumi.set(__self__, "lockbox", lockbox)
if network_injection is not None:
pulumi.set(__self__, "network_injection", network_injection)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def encryption(self) -> Optional[pulumi.Input['PropertiesEncryptionArgs']]:
"""
The encryption settings for a configuration store.
"""
return pulumi.get(self, "encryption")
@encryption.setter
def encryption(self, value: Optional[pulumi.Input['PropertiesEncryptionArgs']]):
pulumi.set(self, "encryption", value)
@property
@pulumi.getter(name="enterprisePolicyName")
def enterprise_policy_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the EnterprisePolicy.
"""
return pulumi.get(self, "enterprise_policy_name")
@enterprise_policy_name.setter
def enterprise_policy_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "enterprise_policy_name", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['EnterprisePolicyIdentityArgs']]:
"""
The identity of the EnterprisePolicy.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['EnterprisePolicyIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def lockbox(self) -> Optional[pulumi.Input['PropertiesLockboxArgs']]:
"""
Settings concerning lockbox.
"""
return pulumi.get(self, "lockbox")
@lockbox.setter
def lockbox(self, value: Optional[pulumi.Input['PropertiesLockboxArgs']]):
pulumi.set(self, "lockbox", value)
@property
@pulumi.getter(name="networkInjection")
def network_injection(self) -> Optional[pulumi.Input['PropertiesNetworkInjectionArgs']]:
"""
Settings concerning network injection.
"""
return pulumi.get(self, "network_injection")
@network_injection.setter
def network_injection(self, value: Optional[pulumi.Input['PropertiesNetworkInjectionArgs']]):
pulumi.set(self, "network_injection", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class EnterprisePolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
encryption: Optional[pulumi.Input[pulumi.InputType['PropertiesEncryptionArgs']]] = None,
enterprise_policy_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['EnterprisePolicyIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
lockbox: Optional[pulumi.Input[pulumi.InputType['PropertiesLockboxArgs']]] = None,
network_injection: Optional[pulumi.Input[pulumi.InputType['PropertiesNetworkInjectionArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Definition of the EnterprisePolicy.
API Version: 2020-10-30-preview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['PropertiesEncryptionArgs']] encryption: The encryption settings for a configuration store.
:param pulumi.Input[str] enterprise_policy_name: Name of the EnterprisePolicy.
:param pulumi.Input[pulumi.InputType['EnterprisePolicyIdentityArgs']] identity: The identity of the EnterprisePolicy.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[pulumi.InputType['PropertiesLockboxArgs']] lockbox: Settings concerning lockbox.
:param pulumi.Input[pulumi.InputType['PropertiesNetworkInjectionArgs']] network_injection: Settings concerning network injection.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: EnterprisePolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Definition of the EnterprisePolicy.
API Version: 2020-10-30-preview.
:param str resource_name: The name of the resource.
:param EnterprisePolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(EnterprisePolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
encryption: Optional[pulumi.Input[pulumi.InputType['PropertiesEncryptionArgs']]] = None,
enterprise_policy_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['EnterprisePolicyIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
lockbox: Optional[pulumi.Input[pulumi.InputType['PropertiesLockboxArgs']]] = None,
network_injection: Optional[pulumi.Input[pulumi.InputType['PropertiesNetworkInjectionArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = EnterprisePolicyArgs.__new__(EnterprisePolicyArgs)
__props__.__dict__["encryption"] = encryption
__props__.__dict__["enterprise_policy_name"] = enterprise_policy_name
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
__props__.__dict__["lockbox"] = lockbox
__props__.__dict__["network_injection"] = network_injection
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:powerplatform:EnterprisePolicy"), pulumi.Alias(type_="azure-native:powerplatform/v20201030preview:EnterprisePolicy"), pulumi.Alias(type_="azure-nextgen:powerplatform/v20201030preview:EnterprisePolicy")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(EnterprisePolicy, __self__).__init__(
'azure-native:powerplatform:EnterprisePolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'EnterprisePolicy':
"""
Get an existing EnterprisePolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = EnterprisePolicyArgs.__new__(EnterprisePolicyArgs)
__props__.__dict__["encryption"] = None
__props__.__dict__["identity"] = None
__props__.__dict__["location"] = None
__props__.__dict__["lockbox"] = None
__props__.__dict__["name"] = None
__props__.__dict__["network_injection"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return EnterprisePolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def encryption(self) -> pulumi.Output[Optional['outputs.PropertiesResponseEncryption']]:
"""
The encryption settings for a configuration store.
"""
return pulumi.get(self, "encryption")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.EnterprisePolicyIdentityResponse']]:
"""
The identity of the EnterprisePolicy.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def lockbox(self) -> pulumi.Output[Optional['outputs.PropertiesResponseLockbox']]:
"""
Settings concerning lockbox.
"""
return pulumi.get(self, "lockbox")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInjection")
def network_injection(self) -> pulumi.Output[Optional['outputs.PropertiesResponseNetworkInjection']]:
"""
Settings concerning network injection.
"""
return pulumi.get(self, "network_injection")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
|
python
|
import logging
import os
import yaml
from DataCuration.main import main as start_web_scrape
from util import create_folder
def load_config():
"""
Loads the configuration file
:return: Content of the configuration file
"""
with open('config.yaml', 'r') as file:
content = yaml.load(file, yaml.FullLoader)
return content
def verify_configurations(conf: dict):
"""
Verify the content loaded from configuration file is correct or not. It is checked in the
beginning to prevent giving errors later in the code.
:param conf: content of the configuration file
:return: None
"""
# TODO: Add checks for content of the configuration file.
pass
def main():
config = load_config()
verify_configurations(config)
start_web_scrape(config)
if __name__ == '__main__':
create_folder(os.path.join(os.getcwd(), 'logs'))
logging.basicConfig(filename='logs/DataCuration.log',
filemode='w',
level=logging.INFO,
format='%(asctime)s: '
'%(filename)s: '
'%(levelname)s: '
'%(lineno)d:\t'
'%(message)s')
main()
|
python
|
########################################
# PROJECT 1 - Linked List
# Author: Tony Sulfaro
# PID: A52995491
########################################
class Node:
# DO NOT MODIFY THIS CLASS #
__slots__ = 'value', 'next_node'
def __init__(self, value, next_node=None):
"""
DO NOT EDIT
Initialize a node
:param value: value of the node
:param next_node: pointer to the next node, default is None
"""
self.value = value # element at the node
self.next_node = next_node # reference to next node
def __eq__(self, other):
"""
DO NOT EDIT
Determine if two nodes are equal (same value)
:param other: node to compare to
:return: True if nodes are equal, False otherwise
"""
if other is None:
return False
if self.value == other.value:
return True
return False
def __repr__(self):
"""
DO NOT EDIT
String representation of a node
:return: string of value
"""
return str(self.value)
class LinkedList:
def __init__(self):
"""
DO NOT EDIT
Create/initialize an empty linked list
"""
self.head = None # Node
self.tail = None # Node
self.size = 0 # Integer
def __eq__(self, other):
"""
DO NOT EDIT
Defines "==" (equality) for two linked lists
:param other: Linked list to compare to
:return: True if equal, False otherwise
"""
if self.size != other.size:
return False
if self.head != other.head or self.tail != other.tail:
return False
# Traverse through linked list and make sure all nodes are equal
temp_self = self.head
temp_other = other.head
while temp_self is not None:
if temp_self == temp_other:
temp_self = temp_self.next_node
temp_other = temp_other.next_node
else:
return False
# Make sure other is not longer than self
if temp_self is None and temp_other is None:
return True
return False
def __repr__(self):
"""
DO NOT EDIT
String representation of a linked list
:return: string of list of values
"""
temp_node = self.head
values = []
if temp_node is None:
return None
while temp_node is not None:
values.append(temp_node.value)
temp_node = temp_node.next_node
return str(values)
###### MODIFY THE BELOW FUNCTIONS #####
# ------------------------Accessor Functions---------------------------
def length(self):
"""
Gets the number of nodes of the linked list
:return: size of list
"""
return self.size
def is_empty(self):
"""
Determines if the linked list is empty
:return: True if list is empty and False if not empty
"""
return self.size == 0
def front_value(self):
"""
Gets the first value of the list
:return: value of the list head
"""
if self.head is not None:
return self.head.value
return None
def back_value(self):
"""
Gets the last value of the list
:return: value of the list tail
"""
if self.tail is not None:
return self.tail.value
return None
def count(self, val):
"""
Counts the number of times a value 'val' occurs in the list
:param val: value to find and count
:return: number of time 'val' occurs
"""
count = 0
temp_self = self.head
if temp_self is None:
return 0
while temp_self is not None:
if temp_self.value == val:
count += 1
temp_self = temp_self.next_node
return count
def find(self, val):
"""
Searches for and returns the first node with the value 'val'
:param val: value to search for
:return: True if value is in list, False if value is not found
"""
temp_self = self.head
while temp_self is not None:
if temp_self.value == val:
return True
temp_self = temp_self.next_node
return False
# ------------------------Mutator Functions---------------------------
def push_front(self, val):
"""
Adds a node to the front of the list with value 'val'
:param val: value to add to list
:return: no return
"""
if self.size == 0:
new_node = Node(val, self.head)
self.head = new_node
self.tail = new_node
self.size += 1
else:
self.head = Node(val, self.head)
self.size += 1
def push_back(self, val):
"""
Adds a node to the back of the list with value 'val'
:param val: value to add to list
:return: no return
"""
if self.size == 0:
new_node = Node(val)
self.head = new_node
self.tail = new_node
self.size += 1
else:
new_node = Node(val)
self.tail.next_node = new_node
self.tail = new_node
self.size += 1
def pop_front(self):
"""
Removes a node from the front of the list
:return: the value of the removed node
"""
head = self.head
if head is not None:
next_node = self.head.next_node
if head is not None:
self.head = next_node
self.size -= 1
return head.value
else:
return None
def pop_back(self):
"""
Removes a node from the back of the list
:return: the value of the removed node
"""
if self.head is not None:
current_node = self.head
prev_node = None
while current_node.next_node is not None:
prev_node = current_node
current_node = current_node.next_node
if prev_node is None: # popping list of one element
self.head = None
self.tail = None
self.size -= 1
return current_node.value
else:
prev_node.next_node = None
self.tail = prev_node
self.size -= 1
return current_node.value
else:
return None
def reverse_list(self):
"""
Reverses the values of the given linked list
:return: no return
"""
current_node = self.head
prev_node = None
self.tail = self.head
while current_node is not None:
next_node = current_node.next_node
current_node.next_node = prev_node
prev_node = current_node
current_node = next_node
self.head = prev_node
def main():
"""
Main Docstring
:return: no return
"""
stu = LinkedList()
stu.push_front(45)
stu.push_front(39)
stu.push_front(10)
stu.push_front(98)
stu.push_front(6)
print(stu)
print('size: ', stu.size)
print('head: ', stu.head.value)
print('tail: ', stu.tail.value)
stu.reverse_list()
print(stu)
print('size: ', stu.size)
print('head: ', stu.head.value)
print('tail: ', stu.tail.value)
'''current_node = stu.head
while current_node.next_node is not None:
print('node: ', current_node.value,' next: ', current_node.next_node.value)
current_node = current_node.next_node'''
if __name__ == "__main__":
main()
|
python
|
from html_parse.src.parser import Parser
import unittest
class TestParser(unittest.TestCase):
def test_remove_end_tags(self):
parser = Parser()
html_string = '<title>Hello</title>'
self.assertEqual(parser.remove_end_tags(html_string), '<title>Hello|;|')
def test_remove_end_tags_with_head(self):
parser = Parser()
html_string = '<head><title>Hello</title></head>'
self.assertEqual(parser.remove_end_tags(html_string), '<head><title>Hello|;||;|')
def test_remove_end_tags_with_html(self):
parser = Parser()
html_string = '<html><head><title>Hello</title></head></html>'
self.assertEqual(parser.remove_end_tags(html_string), '<html><head><title>Hello|;||;||;|')
def test_remove_end_tags_web_page(self):
parser = Parser()
html_string = '<html><head><title>Hello</title></head><body><p>World</p></body></html>'
self.assertEqual(parser.remove_end_tags(html_string), '<html><head><title>Hello|;||;|<body><p>World|;||;||;|')
def test_clean_start_tags(self):
parser = Parser()
html_string = '<title>Hello|;|'
self.assertEqual(parser.clean_start_tags(html_string), '<title>Hello|;|')
def test_clean_start_tags_with_head(self):
parser = Parser()
html_string = '<head><title>Hello|;|'
self.assertEqual(parser.clean_start_tags(html_string), '<title>Hello|;|')
def test_clean_start_tags_with_html(self):
parser = Parser()
html_string = '<html><head><title>Hello|;|'
self.assertEqual(parser.clean_start_tags(html_string), '<title>Hello|;|')
def test_clean_start_tags_web_page(self):
parser = Parser()
html_string = '<html><head><title>Hello|;||;|<body><p>World|;||;||;|'
self.assertEqual(parser.clean_start_tags(html_string), '<title>Hello|;||;|<p>World|;||;||;|')
def test_remove_hanging_colons(self):
parser = Parser()
colons = '|;||;||;||;|'
self.assertEqual(parser.remove_hanging_colons(colons), '|;|')
def test_remove_hanging_colons_with_text(self):
parser = Parser()
string = '|;|hello|;||;||;|'
self.assertEqual(parser.remove_hanging_colons(string), '|;|hello|;|')
def test_remove_hanging_colons_with_html(self):
parser = Parser()
html_string = '<title>Hello|;||;|<p>World|;||;||;|'
self.assertEqual(parser.remove_hanging_colons(html_string), '<title>Hello|;|<p>World|;|')
def test_tag_to_key(self):
parser = Parser()
html_string = '<title>'
self.assertEqual(parser.tag_to_key(html_string), 'title|:|')
def test_tag_to_key_tag_and_text(self):
parser = Parser()
html_string = '<title>Hello|;|<p>World|;|'
self.assertEqual(parser.tag_to_key(html_string), 'title|:|Hello|;|p|:|World|;|')
def test_to_array(self):
parser = Parser()
html_string = 'title|:|Hello|;|p|:|World|;|'
result = parser.to_array(html_string)
self.assertEqual(result[0], 'title|:|Hello')
self.assertEqual(result[1], 'p|:|World')
self.assertEqual(len(result), 2)
def test_to_dicts(self):
parser = Parser()
array = ['title|:|Hello|','p|:|World|']
result = parser.to_dicts(array)
self.assertEqual(result[0]['title'], 'Hello')
self.assertEqual(result[1]['p'], 'World')
self.assertEqual(len(result), 2)
def test_parse(self):
parser = Parser()
html_string = '<html><head><title>Hello</title></head><body><p>World</p></body></html>'
result = parser.parse(html_string)
self.assertEqual(result[0]['title'], 'Hello')
self.assertEqual(result[1]['p'], 'World')
self.assertEqual(len(result), 2)
|
python
|
"""
Book: Building RESTful Python Web Services
Chapter 3: Improving and adding authentication to an API with Django
Author: Gaston C. Hillar - Twitter.com/gastonhillar
Publisher: Packt Publishing Ltd. - http://www.packtpub.com
"""
from rest_framework.pagination import LimitOffsetPagination
class LimitOffsetPaginationWithMaxLimit(LimitOffsetPagination):
max_limit = 10
|
python
|
# AUTHOR: Dalon Lobo
# Python3 Concept: Plotting line plot using matplotlib
# GITHUB: https://github.com/dalonlobo
import numpy as np
import matplotlib.pyplot as plt
# Create dummy x and y values. In this case I create values using numpy.
# This graph will show sine wave
x = np.arange(0, 10, 0.1) # Values for x coordinate
y = np.sin(x) # Values for y coordinate using numpy sin function
plt.plot(x, y) # Plots the x and y coordinates
plt.xlabel("x - values") # show x label
plt.ylabel("y = sin(x)") # show y label
plt.show() # Displays the plot
|
python
|
#!/usr/bin/env python
'''Tools for modifying halo data output files.
@author: Zach Hafen
@contact: [email protected]
@status: Development
'''
import copy
import glob
import numpy as np
import os
import pandas as pd
import galaxy_dive.galaxy_linker.linker as galaxy_linker
import galaxy_dive.analyze_data.halo_data as halo_data
import galaxy_dive.read_data.metafile as read_metafile
import galaxy_dive.utils.astro as astro_utils
import galaxy_dive.utils.constants as constants
import galaxy_dive.utils.data_constants as data_constants
import galaxy_dive.utils.data_operations as data_operations
import galaxy_dive.utils.utilities as utilities
import galaxy_dive.analyze_data.ahf as ahf
import galaxy_dive.analyze_data.particle_data as particle_data
########################################################################
########################################################################
class HaloUpdater( halo_data.HaloData ):
'''Class for updating Halo data (smoothing, adding in additional columns, etc)'''
def __init__( self, *args, **kwargs ):
self.key_parser = ahf.HaloKeyParser()
super( HaloUpdater, self ).__init__( *args, **kwargs )
########################################################################
# Get Data Values
########################################################################
def get_accurate_redshift( self, metafile_dir ):
'''Get a better values of the redshift than what's stored in the Halo filename, by loading them from an external file.
Args:
metafile_dir (str): The directory the snapshot_times are stored in.
Modifies:
self.mtree_halos (dict of pd.DataFrames): Updates the redshift column
'''
# Get the redshift data out
metafile_reader = read_metafile.MetafileReader( metafile_dir )
metafile_reader.get_snapshot_times()
# Replace the old data
for halo_id in self.mtree_halos.keys():
mtree_halo = self.mtree_halos[ halo_id ]
# Read and replace the new redshift
new_redshift = metafile_reader.snapshot_times['redshift'][ mtree_halo.index ]
mtree_halo['redshift'] = new_redshift
########################################################################
def get_analytic_concentration( self, metafile_dir, type_of_halo_id='merger_tree' ):
'''Get analytic values for the halo concentration, using colossus, Benedikt Diemer's cosmology code
( https://bitbucket.org/bdiemer/colossus ; http://www.benediktdiemer.com/code/colossus/ ).
Assumptions:
- We're using the default formula of Diemer&Kravtstov15
- We're using the Bryan&Norman1998 version of the virial radius.
Args:
metafile_dir (str): The directory the snapshot_times are stored in.
type_of_halo_id (str): 'merger_tree' if the halo id is a merger tree halo id.
'halos' if the halo id is a *.AHF_halos halo id.
Returns
c_vir (np.array of floats): The concentration, defined as R_vir/r_scale.
'''
# Include imports here, because this function may not in general work if colossus is not available,
# and the rest of the module should still be made useable
# There may be some warnings here about the version of scipy colossus uses, as opposed to the version galaxy_dive uses
import colossus.cosmology.cosmology as co_cosmology
import colossus.halo.concentration as co_concentration
# Get simulation parameters, for use in creating a cosmology
metafile_reader = read_metafile.MetafileReader( metafile_dir )
metafile_reader.get_used_parameters()
# Setup the cosmology used by the simulations
sim_cosmo = {
'flat': True,
'H0' : float( metafile_reader.used_parameters['HubbleParam'] )*100.,
'Om0' : float( metafile_reader.used_parameters['Omega0'] ),
'Ob0' : float( metafile_reader.used_parameters['OmegaBaryon'] ),
'sigma8' : co_cosmology.cosmologies['WMAP9']['sigma8'], # Use WMAP9 for values we don't store in our simulations explicitly.
'ns' : co_cosmology.cosmologies['WMAP9']['ns'], # Use WMAP9 for values we don't store in our simulations explicitly.
}
cosmo = co_cosmology.setCosmology( 'sim_cosmo', sim_cosmo )
if type_of_halo_id == 'merger_tree':
# Loop over all mt halos
for halo_id in self.mtree_halos.keys():
# Load the data
mtree_halo = self.mtree_halos[ halo_id ]
# Get the concentration out
c_vir = []
for m_vir, z in zip( mtree_halo['Mvir'], mtree_halo['redshift'] ):
c = co_concentration.concentration( m_vir, 'vir', z, model='diemer15', statistic='median')
c_vir.append( c )
# Turn the concentration into an array
c_vir = np.array( c_vir )
# Save the concentration
mtree_halo['cAnalytic'] = c_vir
elif type_of_halo_id == 'halos':
# Get the redshift for the halo file.
metafile_reader.get_snapshot_times()
redshift = metafile_reader.snapshot_times['redshift'][self.halos_snum]
# Get the concentration
c = co_concentration.concentration( self.halos['Mvir'], 'vir', redshift, model='diemer15', statistic='median')
return c
########################################################################
def get_mass_radii(
self,
mass_fractions,
simulation_data_dir,
galaxy_cut,
length_scale,
):
'''Get radii that enclose a fraction (mass_fractions[i]) of a halo's stellar mass.
Args:
mass_fractions (list of floats) :
Relevant mass fractions.
simulation_data_dir (str) :
Directory containing the raw particle data.
galaxy_cut (float) :
galaxy_cut*length_scale defines the radius around the center of the halo to look for stars.
length_scale (str) :
galaxy_cut*length_scale defines the radius around the center of the halo to look for stars.
Returns:
mass_radii (list of np.ndarrays) :
If M_sum_j = all mass inside galaxy_cut*length_scale for halo j, then mass_radii[i][j] is the radius that
contains a fraction mass_fractions[i] of M_sum_j.
'''
# Load the simulation data
s_data = particle_data.ParticleData(
simulation_data_dir,
self.halos_snum,
ptype = data_constants.PTYPES['star'],
)
try:
particle_positions = s_data.data['P'].transpose()
# Case where there are no star particles at this redshift.
except KeyError:
return [ np.array( [ np.nan, ]*self.halos.index.size ), ]*len( mass_fractions )
# Find the mass radii
galaxy_linker_kwargs = {
'particle_positions' : particle_positions,
'particle_masses' : s_data.data['M'],
'snum' : self.halos_snum,
'redshift' : s_data.redshift,
'hubble' : s_data.data_attrs['hubble'],
'galaxy_cut' : galaxy_cut,
'length_scale' : length_scale,
'halo_data' : self,
}
gal_linker = galaxy_linker.GalaxyLinker( **galaxy_linker_kwargs )
mass_radii = [ gal_linker.get_mass_radius( mass_fraction ) for mass_fraction in mass_fractions ]
return mass_radii
########################################################################
def get_enclosed_mass( self,
simulation_data_dir,
ptype,
galaxy_cut,
length_scale,
):
'''Get the mass inside galaxy_cut*length_scale for each Halo halo.
Args:
simulation_data_dir (str) :
Directory containing the raw particle data.
ptype (str) :
What particle type to get the mass for.
galaxy_cut (float) :
galaxy_cut*length_scale defines the radius around the center of the halo within which to get the mass.
length_scale (str) :
galaxy_cut*length_scale defines the radius around the center of the halo within which to get the mass.
Returns:
mass_inside_all_halos (np.ndarray) :
mass_inside_all_halos[i] is the mass of particle type ptype inside galaxy_cut*length scale around a galaxy.
'''
# Load the simulation data
s_data = particle_data.ParticleData(
simulation_data_dir,
self.halos_snum,
data_constants.PTYPES[ptype],
)
try:
particle_positions = s_data.data['P'].transpose()
# Case where there are no star particles at this redshift.
except KeyError:
return np.array( [ 0., ]*self.halos.index.size )
# Find the mass radii
galaxy_linker_kwargs = {
'particle_positions' : particle_positions,
'particle_masses' : s_data.data['M']*constants.UNITMASS_IN_MSUN,
'snum' : self.halos_snum,
'redshift' : s_data.redshift,
'hubble' : s_data.data_attrs['hubble'],
'galaxy_cut' : galaxy_cut,
'length_scale' : length_scale,
'halo_data' : self,
}
gal_linker = galaxy_linker.GalaxyLinker( **galaxy_linker_kwargs )
mass_inside_all_halos = gal_linker.mass_inside_all_halos
# Make sure to put hubble constant back in so we have consistent units.
mass_inside_all_halos *= s_data.data_attrs['hubble']
return mass_inside_all_halos
########################################################################
def get_average_quantity_inside_galaxy( self,
data_key,
simulation_data_dir,
ptype,
galaxy_cut,
length_scale,
weight_data_key = 'M',
fill_value = np.nan,
):
'''Get the mass inside galaxy_cut*length_scale for each Halo halo.
Args:
data_key (str) :
Data key for the quantity to get the average of.
simulation_data_dir (str) :
Directory containing the raw particle data.
ptype (str) :
What particle type to get the mass for.
galaxy_cut (float) :
galaxy_cut*length_scale defines the radius around the center of the halo within which to get the mass.
length_scale (str) :
galaxy_cut*length_scale defines the radius around the center of the halo within which to get the mass.
weight_data_key (str) :
Data key for the weight to use when averaging.
fill_value (float) :
What value to use when the average quantity inside the galaxy is not resolved.
Returns:
average_quantity_inside_galaxy (np.ndarray) :
average_quantity_inside_galaxy[i] is the average value of the requested quantity for particle type ptype
inside galaxy_cut*length scale around a galaxy.
'''
# Load the simulation data
s_data = particle_data.ParticleData(
simulation_data_dir,
self.halos_snum,
data_constants.PTYPES[ptype],
# The following values need to be set, because they come into play when a galaxy is centered on halo finder
# data. That's obviously not the case here...
centered = True,
vel_centered = True,
hubble_corrected = True,
)
try:
particle_positions = s_data.data['P'].transpose()
# Case where there are no particles of the given ptype at this redshift.
except KeyError:
return np.array( [ fill_value, ]*self.halos.index.size )
# Find the mass radii
galaxy_linker_kwargs = {
'particle_positions' : particle_positions,
'snum' : self.halos_snum,
'redshift' : s_data.redshift,
'hubble' : s_data.data_attrs['hubble'],
'galaxy_cut' : galaxy_cut,
'length_scale' : length_scale,
'halo_data' : self,
}
gal_linker = galaxy_linker.GalaxyLinker( low_memory_mode=False, **galaxy_linker_kwargs )
average_quantity_inside_galaxy = gal_linker.weighted_summed_quantity_inside_galaxy(
s_data.get_data( data_key ),
s_data.get_data( weight_data_key ),
fill_value,
)
return average_quantity_inside_galaxy
########################################################################
def get_circular_velocity( self,
galaxy_cut,
length_scale,
metafile_dir,
ptypes = data_constants.STANDARD_PTYPES,
):
'''Get the circular velocity at galaxy_cut*length_scale.
Args:
galaxy_cut (float) :
galaxy_cut*length_scale defines the radius around the center of the halo within which to get the mass.
length_scale (str) :
galaxy_cut*length_scale defines the radius around the center of the halo within which to get the mass.
metafile_dir (str) :
Directory containing metafile data, for getting out the redshift given a snapshot.
ptypes (list of strs) :
Particle types to count the mass inside the halo of.
Returns:
v_circ (np.ndarray)
Circular velocity at galaxy_cut*length_scale using mass from the given ptypes.
'''
# Get the redshift, for converting the radius to pkpc/h.
metafile_reader = read_metafile.MetafileReader( metafile_dir )
metafile_reader.get_snapshot_times()
redshift = metafile_reader.snapshot_times['redshift'][self.halos_snum]
# Get the radius in pkpc/h
try:
radius = galaxy_cut*self.halos[length_scale]
except KeyError:
radius = galaxy_cut*self.halos_add[length_scale]
radius /= ( 1. + redshift )
# Get the mass in Msun/h
masses = []
for ptype in ptypes:
mass_key = self.key_parser.get_enclosed_mass_key( ptype, galaxy_cut, length_scale )
try:
ptype_mass = self.halos_add[mass_key]
except:
ptype_mass = self.halos[mass_key]
masses.append( ptype_mass )
mass = np.array( masses ).sum( axis=0 )
# Now get the circular velocity out
# (note that we don't need to bother with converting out the 1/h's, because in this particular case they'll cancel)
v_circ = astro_utils.circular_velocity( radius, mass )
return v_circ
########################################################################
# Alter Data
########################################################################
def smooth_mtree_halos(
self,
metafile_dir,
keys_to_smooth = [],
smooth_kwargs = { 'window_len' : 20, 'window' : 'flat' },
):
'''Make Rvir and Mvir monotonically increasing, to help mitigate artifacts in the Halo-calculated merger tree.
NOTE: This smooths in *physical* coordinates, so it may not be exactly smooth in comoving coordinates.
Args:
metafile_dir (str) :
The directory the snapshot_times are stored in.
keys_to_smooth (list of strs) :
If given, also smooth the data given by these keys.
This smoothing isn't done to assume a monotonic increase, but is
a convolve with a moving filter through data_operations.smooth()
smooth_kwargs (dict) :
Specific arguments that determine exactly how the smoothing is
done, when also smoothing for specific keys.
Modifies:
self.mtree_halos (dict of pd.DataFrames) :
Changes self.mtree_halos[halo_id]['Rvir'] and self.mtree_halos[halo_id]['Mvir'] to be monotonically increasing.
'''
# We need to get an accurate redshift in order to smooth properly
self.get_accurate_redshift( metafile_dir )
for halo_id in self.mtree_halos.keys():
# Load the data
mtree_halo = self.mtree_halos[ halo_id ]
# Convert into physical coords for smoothing (we'll still leave the 1/h in place)
r_vir_phys = mtree_halo['Rvir']/( 1. + mtree_halo['redshift'] )
# Smooth r_vir
r_vir_phys_smooth = np.maximum.accumulate( r_vir_phys[::-1] )[::-1]
# Convert back into comoving and save
mtree_halo['Rvir'] = r_vir_phys_smooth*( 1. + mtree_halo['redshift'] )
# Smooth Mvir
mtree_halo['Mvir'] = np.maximum.accumulate( mtree_halo['Mvir'][::-1] )[::-1]
for smooth_key in keys_to_smooth:
original_data = copy.copy( mtree_halo[smooth_key].values )
try:
smoothed_data = data_operations.smooth(
original_data,
**smooth_kwargs
)
except ValueError:
continue
# Replace NaN values with original values, where possible
smoothed_nan = np.isnan( smoothed_data )
smoothed_data[smoothed_nan] = original_data[smoothed_nan]
smooth_save_key = 's' + smooth_key
mtree_halo[smooth_save_key] = smoothed_data
########################################################################
def include_halos_to_mtree_halos( self ):
'''While most of the halofile data are contained in *.AHF_halos files, some quantities are stored in
*.AHF_halos files. These are usually computed manually, external to what's inherent in AHF. This routine adds
on the the information from these files to the loaded merger tree data (which don't usually include them, because
they're not inherent to AHF.
Modifies:
self.mtree_halos (dict of pd.DataFrames) :
Adds additional columns contained in *.AHF_halos_add files.
'''
for mtree_id, mtree_halo in self.mtree_halos.items():
print( "Looking at merger tree ID {}".format( mtree_id ) )
halo_ids = mtree_halo['ID'].values
snums = mtree_halo.index
ahf_frames = []
for snum, halo_id in zip( snums, halo_ids, ):
print( "Getting data for snapshot {}".format( snum ) )
self.get_halos( snum )
# Get the columns we want to add on.
halofile_columns = set( self.halos.columns )
mtree_columns = set( mtree_halo.columns )
columns_to_add = list( halofile_columns - mtree_columns )
columns_to_add.sort()
# Now get the values to add
if self.halos.index.size != 0:
full_ahf_row = self.halos.loc[halo_id:halo_id]
ahf_row = full_ahf_row[columns_to_add]
# Check for edge case, where there isn't an AHF row with specified halo number or there are no more halos
if ( self.halos.index.size == 0 ) or ( ahf_row.size == 0 ):
empty_data = {}
for column_name in columns_to_add:
empty_data[column_name] = [ np.nan, ]
ahf_row = pd.DataFrame( empty_data, index=[ halo_id, ], )
ahf_frames.append( ahf_row )
custom_mtree_halo = pd.concat( ahf_frames )
# Add in the snapshots, and use them as the index
try:
custom_mtree_halo['snum'] = snums
except:
#DEBUG
import pdb; pdb.set_trace()
custom_mtree_halo = custom_mtree_halo.set_index( 'snum', )
# Now merge onto the mtree_halo DataFram
self.mtree_halos[mtree_id] = pd.concat( [ mtree_halo, custom_mtree_halo, ], axis=1 )
########################################################################
# Save Data
########################################################################
def save_mtree_halos( self, tag ):
'''Save loaded mergertree halo files in a csv file.
Args:
tag (str) : If the previous file was for example '/path/to/file/halo_00000.dat',
the new file will be '/path/to/file/halo_00000_{}.dat'.format( tag )
'''
for halo_id in self.mtree_halos.keys():
# Load the data
mtree_halo = self.mtree_halos[ halo_id ]
halo_filepath = self.mtree_halo_filepaths[ halo_id ]
# Create the new filename
filepath_base, file_ext = os.path.splitext( halo_filepath )
save_filepath = '{}_{}{}'.format( filepath_base, tag, file_ext )
mtree_halo.to_csv( save_filepath, sep='\t' )
########################################################################
def save_smooth_mtree_halos(
self,
metafile_dir,
index = None,
include_halos_add = True,
include_concentration = False,
smooth_keys = [ 'Rstar0.5', ],
**get_mtree_halo_kwargs
):
'''Load halo files, smooth them, and save as a new file e.g., halo_00000_smooth.dat
Args:
metafile_dir (str) :
The directory the metafiles (snapshot_times and used_parameters) are stored in.
index (str or int) :
What type of index to use. Defaults to None, which raises an exception. You *must* choose an
index, to avoid easy mistakes. See get_mtree_halos() for a full description.
include_concentration (bool):
Whether or not to add an additional column that gives an analytic value for the
halo concentration.
'''
# Load the data
self.get_mtree_halos( index=index, **get_mtree_halo_kwargs )
# Include data stored in *AHF_halos_add files.
if include_halos_add:
self.include_halos_to_mtree_halos()
# Smooth the halos
self.smooth_mtree_halos( metafile_dir, smooth_keys, )
# Include the concentration, if chosen.
if include_concentration:
self.get_analytic_concentration( metafile_dir )
# Save the halos
self.save_mtree_halos( 'smooth' )
########################################################################
def save_custom_mtree_halos( self, snums, halo_ids, metafile_dir, ):
'''Save a custom merger tree.
Args:
snums (array-like or int) :
What snapshots to generate the custom merger tree for.
If a single integer, then snums will start at that integer and count backwards by single snapshots for the
length of halo_ids
halo_ids (array-like) :
halo_ids[i] is the AHF_halos halo ID for the merger tree halo at snums[i].
metafile_dir (str) :
Directory for the metafile (used to get simulation redshift).
Modifies:
self.data_dir/halo_00000_custom.dat (text file) : Saves the custom merger tree at this location.
'''
if isinstance( snums, int ):
snums = np.arange( snums, snums - len( halo_ids ), -1 )
# Concatenate the data
ahf_frames = []
for snum, halo_id in zip( snums, halo_ids, ):
print( "Getting data for snapshot {}".format( snum ) )
self.get_halos( snum )
ahf_frames.append( self.halos.loc[halo_id:halo_id] )
custom_mtree_halo = pd.concat( ahf_frames )
# Make sure to store the IDs too
custom_mtree_halo['ID'] = halo_ids
# Add in the snapshots, and use them as the index
custom_mtree_halo['snum'] = snums
custom_mtree_halo = custom_mtree_halo.set_index( 'snum', )
# Get and save the redshift
metafile_reader = read_metafile.MetafileReader( metafile_dir )
metafile_reader.get_snapshot_times()
custom_mtree_halo['redshift'] = metafile_reader.snapshot_times['redshift'][snums]
# Save the data
save_filepath = os.path.join( self.data_dir, 'halo_00000_custom.dat' )
custom_mtree_halo.to_csv( save_filepath, sep='\t' )
########################################################################
def save_halos_add( self,
snum,
include_analytic_concentration = True,
include_mass_radii = True,
include_enclosed_mass = True,
include_average_quantity_inside_galaxy = False,
include_v_circ = True,
metafile_dir = None,
simulation_data_dir = None,
mass_radii_kwargs = {
'mass_fractions' : [ 0.5, 0.75, 0.9, ],
'galaxy_cut' : 0.15,
'length_scale' : 'Rvir',
},
enclosed_mass_ptypes = data_constants.STANDARD_PTYPES,
enclosed_mass_kwargs = {
'galaxy_cut' : 5.0,
'length_scale' : 'Rstar0.5',
},
average_quantity_data_keys = [ 'Vx', 'Vy', 'Vz', ],
average_quantity_inside_galaxy_kwargs = {
'ptype' : 'star',
'galaxy_cut' : 5.0,
'length_scale' : 'Rstar0.5',
},
v_circ_kwargs = {
'galaxy_cut' : 5.0,
'length_scale' : 'Rstar0.5',
},
verbose = False,
):
'''Save additional columns that would be part of *.AHF_halos files, if that didn't break AHF.
Args:
snum (int) :
Snapshot number to load.
include_analytic_concentration (bool) :
Include analytic concentration as one of the columns?
include_mass_radii (bool) :
Include radius that include some fraction of a particle's mass as one of the columns?
include_enclosed_mass (bool) :
Include the mass enclosed in some specified radii as one of the columns?
include_average_quantity_inside_galaxy (bool) :
Include the average value inside each galaxy for the quantities listed in average_quantity_data_keys?
include_v_circ (bool) :
Include the circular mass at some specified radii as one of the columns?
metafile_dir (str) :
The directory the metafiles (snapshot_times and used_parameters) are stored in.
simulation_data_dir (str) :
Directory containing the simulation data (used for getting the position and masses of the star particles).
mass_radii_kwargs (dict) :
Keyword args for self.get_mass_radii()
enclosed_mass_ptypes (list of strs) :
Particle types to get the mass inside a radii of.
enclosed_mass_kwargs (dict) :
Keyword args for self.get_enclosed_mass()
average_quantity_data_keys (list of strs) :
What data keys (to be passed to a standard ParticleData.get_data() function) to get the average quantity for?
average_quantity_kwargs (dict) :
Keyword args for self.get_average_quantity_inside_galaxy()
v_circ_kwargs (dict) :
Keyword args for self.get_circular_velocity()
verbose (bool) :
If True, print out additional information about how the steps are progressing.
'''
print('Saving *.AHF_halos_add for snum {}'.format( snum ))
# Load the AHF_halos data
self.get_halos( snum )
# Figure out if there are any valid halos at this redshift if not, then a *lot* can be skipped.
# TODO: Don't hard-code this in....
valid_halos = self.halos['n_star'] >= 10
no_valid_halos = valid_halos.sum() == 0
blank_array = np.array( [ np.nan, ]*self.halos.index.size )
# Create AHF_halos add
self.halos_add = pd.DataFrame( {}, index=self.halos.index )
self.halos_add.index.names = ['ID']
# Get the analytic concentration
if include_analytic_concentration:
if verbose:
print( "Including Analytic Concentration..." )
self.halos_add['cAnalytic'] = self.get_analytic_concentration( metafile_dir, type_of_halo_id='halos' )
# Get characteristic radii
if include_mass_radii:
if verbose:
print( "Including Mass Radii..." )
if no_valid_halos:
mass_radii = [ blank_array, ]*len( mass_radii_kwargs['mass_fractions'] )
else:
mass_radii = self.get_mass_radii( simulation_data_dir = simulation_data_dir, **mass_radii_kwargs )
for i, mass_fraction in enumerate( mass_radii_kwargs['mass_fractions'] ):
label = 'Rstar{}'.format( mass_fraction )
self.halos_add[label] = mass_radii[i]
# Get mass enclosed in a particular radius
if include_enclosed_mass:
if verbose:
print( "Including Enclosed Mass..." )
for i, ptype in enumerate( enclosed_mass_ptypes ):
if no_valid_halos:
halo_masses = blank_array
else:
halo_masses = self.get_enclosed_mass( simulation_data_dir, ptype, **enclosed_mass_kwargs )
label = self.key_parser.get_enclosed_mass_key( ptype, enclosed_mass_kwargs['galaxy_cut'], \
enclosed_mass_kwargs['length_scale'], )
self.halos_add[label] = halo_masses
# Get average quantity inside each galaxy (for halos that have galaxies)
if include_average_quantity_inside_galaxy:
if verbose:
print( "Including Average Quantities..." )
for i, data_key in enumerate( average_quantity_data_keys ):
if verbose:
print( " Finding average {}...".format( data_key ) )
if no_valid_halos:
average_quantity = blank_array
else:
average_quantity = self.get_average_quantity_inside_galaxy(
data_key,
simulation_data_dir,
**average_quantity_inside_galaxy_kwargs
)
label = self.key_parser.get_average_quantity_key(
data_key,
average_quantity_inside_galaxy_kwargs['ptype'],
average_quantity_inside_galaxy_kwargs['galaxy_cut'],
average_quantity_inside_galaxy_kwargs['length_scale'],
)
self.halos_add[label] = average_quantity
# Get circular velocity at a particular radius
if include_v_circ:
if verbose:
print( "Including Circular Velocity..." )
v_circ = self.get_circular_velocity( metafile_dir=metafile_dir, **v_circ_kwargs )
label = self.key_parser.get_velocity_at_radius_key(
'Vc',
v_circ_kwargs['galaxy_cut'],
v_circ_kwargs['length_scale']
)
self.halos_add[label] = v_circ
# Save AHF_halos add
save_filepath = '{}_add'.format( self.halos_path )
self.halos_add.to_csv( save_filepath, sep='\t' )
########################################################################
def save_multiple_halos_adds( self, metafile_dir, snum_start, snum_end, snum_step ):
'''Save additional columns that would be part of *.AHF_halos files, if that didn't break AHF.
Do this for every *.AHF_halos file in self.data_dir.
Args:
metafile_dir (str): The directory the metafiles (snapshot_times and used_parameters) are stored in.
snum_start (int): Starting snapshot.
snum_end (int): Ending snapshot.
snum_step (int): Step between snapshots.
'''
# Save the halos
for snum in range( snum_start, snum_end+snum_step, snum_step):
# Save the data
self.save_halos_add( snum, metafile_dir )
|
python
|
import typing
from django.core.paginator import Paginator
class Pagination:
result_list: typing.Iterable
can_show_all: bool
show_all: bool
multi_page: bool
paginator: Paginator
page_num: int
list_per_page = 20
list_max_show_all = 2000
def __init__(self, object_list: typing.Iterable, page_num: int, show_all: bool):
paginator = Paginator(object_list, self.list_per_page)
result_count = paginator.count
can_show_all = result_count <= self.list_max_show_all
multi_page = result_count > self.list_per_page
pagination_required = (not show_all or not can_show_all) and multi_page
page_range = (paginator.get_elided_page_range(page_num, on_each_side=2, on_ends=2)
if pagination_required
else [])
need_show_all_link = can_show_all and not show_all and multi_page
if (show_all and can_show_all) or not multi_page:
result_list = object_list
else:
result_list = paginator.get_page(page_num).object_list
self.result_list = result_list
self.can_show_all = can_show_all
self.show_all = show_all
self.multi_page = multi_page
self.paginator = paginator
self.page_num = page_num
self.pagination_required = pagination_required
self.page_range = page_range
self.need_show_all_link = need_show_all_link
|
python
|
import spotipy
from spotipy import util
from spotipy.oauth2 import SpotifyClientCredentials
import os
from dotenv import load_dotenv
from pprint import pprint
# load_dotenv()
#
# os.environ['SPOTIPY_CLIENT_ID'] = os.getenv('client_id')
# os.environ['SPOTIPY_CLIENT_SECRET'] = os.getenv('client_secret')
client_id = '9a4e32732c6045289b1d85705c247a0f'
client_secret = '0ec437eade2b42ef878ea7009de904ef'
# sp = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials(client_id=client_id, client_secret=client_secret))
def spotipy_api():
sp = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials(client_id=client_id, client_secret=client_secret))
return sp
if __name__ == "__main__":
# generate_track_csv()
# sp = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials())
sp = spotipy_api()
# pprint(sp.recommendation_genre_seeds())
result = sp.track('2MLHyLy5z5l5YRp7momlgw')
pprint(result)
print(result['artists'][0]['name'])
print(result['name'])
# urn = 'spotify:track:2MLHyLy5z5l5YRp7momlgw'
# track = sp.track(urn)
# pprint(track)
# seed_artists = ['3jOstUTkEu2JkjvRdBA5Gu']
# seed_genres = ['rock']
# for i in range(1):
# result = sp.recommendations(seed_artists=seed_artists, seed_genres=['rock'], seed_tracks=['2MLHyLy5z5l5YRp7momlgw'])
# pprint(result)
# for t in result['tracks']:
# pprint(t['artists'][0]['name'])
# pprint(t['id'])
# dict_keys(['meta', 'track', 'bars', 'beats', 'sections', 'segments', 'tatums'])
# urn = 'spotify:track:2MLHyLy5z5l5YRp7momlgw'
# track = sp.audio_analysis(urn)
# pprint(track)
# pprint(track['meta'])
# song_features = sp.audio_features(urn)
# pprint(song_features)
#
#
# # get genres
# pprint(sp.recommendation_genre_seeds())
#
# util.prompt_for_user_token("bi423x859c25z4xnvy06kquj4",
# "user-library-read",
# client_id=os.getenv("SPOTIFY_CLIENT_ID"),
# client_secret=os.getenv("SPOTIFY_CLIENT_SECRET"),
# redirect_uri='http://localhost')
#
# client_id = '9a4e32732c6045289b1d85705c247a0f'
# client_secret = '0ec437eade2b42ef878ea7009de904ef'
# sp = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials(client_id=client_id, client_secret=client_secret))
|
python
|
#!/usr/bin/python3
# https://practice.geeksforgeeks.org/problems/odd-even-level-difference/1
def getLevelDiff(root):
h = {0: 0, 1: 0}
level = 0
populateDiff(root, level, h)
return h[0]-h[1]
def populateDiff(root, level, h):
if root == None:
return
l = level%2
h[l] += root.data
populateDiff(root.left, level+1, h)
populateDiff(root.right, level+1, h)
|
python
|
############################################################################
# #
# Copyright (c) 2019 Carl Drougge #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import sys
from os import getcwd, chdir
from os.path import dirname, basename, realpath, join
from locale import resetlocale
from glob import glob
from argparse import ArgumentParser, RawDescriptionHelpFormatter
from accelerator.error import UserError
cfg = None
def find_cfgs(basedir='.', wildcard=False):
"""Find all accelerator.conf (or accelerator*.conf if wildcard=True)
starting at basedir and continuing all the way to /, yielding them
from the deepest directory first, starting with accelerator.conf (if
present) and then the rest in sorted order."""
cfgname = 'accelerator.conf'
if wildcard:
pattern = 'accelerator*.conf'
else:
pattern = cfgname
orgdir = getcwd()
basedir = realpath(basedir)
while basedir != '/':
try:
chdir(basedir)
fns = sorted(glob(pattern))
finally:
chdir(orgdir)
if cfgname in fns:
fns.remove(cfgname)
fns.insert(0, cfgname)
for fn in fns:
yield join(basedir, fn)
basedir = dirname(basedir)
def load_some_cfg(basedir='.', all=False):
global cfg
basedir = realpath(basedir)
cfgs = find_cfgs(basedir, wildcard=all)
if all:
found_any = False
# Start at the root, so closer cfgs override those further away.
for fn in reversed(list(cfgs)):
try:
load_cfg(fn)
found_any = True
except Exception:
# As long as we find at least one we're happy.
pass
if not found_any:
raise UserError("Could not find 'accelerator*.conf' in %r or any of its parents." % (basedir,))
else:
try:
fn = next(cfgs)
except StopIteration:
raise UserError("Could not find 'accelerator.conf' in %r or any of its parents." % (basedir,))
load_cfg(fn)
def load_cfg(fn):
global cfg
from accelerator.configfile import load_config
from accelerator.job import WORKDIRS
cfg = load_config(fn)
for k, v in cfg.workdirs.items():
if WORKDIRS.get(k, v) != v:
print("WARNING: %s overrides workdir %s" % (fn, k,), file=sys.stderr)
WORKDIRS[k] = v
return cfg
def unpath(path):
while path in sys.path:
sys.path.pop(sys.path.index(path))
def setup(config_fn=None, debug_cmd=False):
resetlocale()
# Make sure the accelerator dir in not in sys.path
# (as it might be if running without installing.)
unpath(dirname(__file__))
if config_fn is False:
return
if config_fn:
load_cfg(config_fn)
else:
load_some_cfg(all=debug_cmd)
if not debug_cmd:
# We want the project directory to be first in sys.path.
unpath(cfg['project_directory'])
sys.path.insert(0, cfg['project_directory'])
# For consistency we also always want the project dir
# as working directory.
chdir(cfg['project_directory'])
def cmd_dsgrep(argv):
from accelerator.dsgrep import main
return main(argv)
cmd_dsgrep.help = '''Search for a pattern in one or more datasets'''
def cmd_dsinfo(argv):
from accelerator.dsinfo import main
return main(argv)
cmd_dsinfo.help = '''Display information about datasets'''
def cmd_run(argv):
from accelerator.build import main
return main(argv, cfg)
cmd_run.help = '''Run a build script'''
def cmd_daemon(argv):
from accelerator.daemon import main
main(argv, cfg)
cmd_daemon.help = '''Run the main daemon'''
def cmd_init(argv):
from accelerator.init import main
main(argv)
cmd_init.help = '''Create a project directory'''
def cmd_urd(argv):
from accelerator.urd import main
main(argv, cfg)
cmd_urd.help = '''Run the urd daemon'''
def cmd_curl(argv):
prog = argv.pop(0)
if argv and argv[0] in ('daemon', 'urd',):
which = argv.pop(0)
else:
which = 'urd'
if '--help' in argv or '-h' in argv or not argv:
from os import environ
print('Usage: %s [daemon|urd] [curl options] path' % (prog,))
print('%s daemon talks to the daemon, %s urd talks to urd (default).' % (prog, prog,))
print()
print('Examples:')
print('%s %s/example/latest' % (prog, environ['USER'],))
print('%s daemon status' % (prog,))
return
url_end = argv.pop()
socket_opts = []
if which == 'urd':
url_start = cfg.urd
else: # daemon
url_start = cfg.url
if url_start.startswith('unixhttp://'):
from accelerator.compat import unquote_plus
url_start = url_start.split('://', 1)[1]
if '/' in url_start:
socket, url_start = url_start.split('/', 1)
else:
socket, url_start = url_start, ''
socket_opts = ['--unix-socket', unquote_plus(socket)]
url_start = join('http://.', url_start)
argv = ['curl', '-s'] + socket_opts + argv + [join(url_start, url_end)]
from subprocess import Popen, PIPE
import json
output, _ = Popen(argv, stdout=PIPE).communicate()
try:
output = json.dumps(json.loads(output), indent=4)
except Exception:
pass
print(output)
cmd_curl.help = '''http request (with curl) to urd or the daemon'''
DEBUG_COMMANDS = {'dsgrep', 'dsinfo',}
COMMANDS = dict(
dsgrep=cmd_dsgrep,
dsinfo=cmd_dsinfo,
run=cmd_run,
daemon=cmd_daemon,
init=cmd_init,
urd=cmd_urd,
curl=cmd_curl,
)
class HelpFixArgumentParser(ArgumentParser):
'''We don't want this argument parser to eat --help for our
sub commands, but we do want it to take help when no command
is specified'''
def __init__(self, argv, **kw):
self.__argv = argv
ArgumentParser.__init__(self, **kw)
def error(self, message):
if '--help' in self.__argv or '-h' in self.__argv:
self.print_help()
self.exit(0)
ArgumentParser.error(self, message)
def main():
from accelerator.autoflush import AutoFlush
argv = sys.argv[1:]
sys.stdout = AutoFlush(sys.stdout)
sys.stderr = AutoFlush(sys.stderr)
epilog = ['commands:', '']
cmdlen = max(len(cmd) for cmd in COMMANDS)
template = ' %%%ds %%s' % (cmdlen,)
for cmd, func in sorted(COMMANDS.items()):
epilog.append(template % (cmd, func.help,))
epilog.append('')
epilog.append('Use %(prog)s <command> --help for <command> usage.')
parser = HelpFixArgumentParser(
argv,
add_help=False,
epilog='\n'.join(epilog),
formatter_class=RawDescriptionHelpFormatter,
)
parser.add_argument('--config', metavar='CONFIG_FILE', help='Configuration file')
parser.add_argument('command')
args, argv = parser.parse_known_args(argv)
if args.command not in COMMANDS:
parser.print_help(file=sys.stderr)
print(file=sys.stderr)
print('Unknown command "%s"' % (args.command,), file=sys.stderr)
sys.exit(2)
try:
config_fn = args.config
if args.command == 'init':
config_fn = False
setup(config_fn, debug_cmd=args.command in DEBUG_COMMANDS)
argv.insert(0, '%s %s' % (basename(sys.argv[0]), args.command,))
return COMMANDS[args.command](argv)
except UserError as e:
print(e, file=sys.stderr)
return 1
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import urllib.request
import re
import os
import sqlite3
from bs4 import BeautifulSoup
from datetime import datetime
from loc_code import LOC_CODE
def jp_sqlite3_init(conn, cursor):
cursor.execute('''CREATE TABLE IF NOT EXISTS realestate (district text, dong text, apt_name text, apt_built_year text, apt_size text, apt_floor text, trade_date text, trade_price text)''')
conn.commit()
return
def jp_sqlite3_insert(conn, c, query):
c.execute(query)
conn.commit()
def jp_sqlite3_select(conn, c, query):
c.execute(query)
res = c.fetchone()
if res is None:
return False
return True
# 10년치는 max 8개 (6월부터니까 다 하고나서 6월부터 다시 받도록)
# 파주시 아동동 실패남
def realstate_trade_10year(conn, cursor):
d_code = {
'41150': '의정부시',
'41171': '안양만안구',
'41173': '안양동안구',
'41195': '부천원미구',
'41197': '부천소사구',
'41199': '부천오정구',
'43111': '청주상당구',
'43112': '청주서원구',
}
# '28140': '동구',
# '28170': '남구',
# '28185': '연수구',
# '28200': '남동구',
# '28237': '부평구',
# '28245': '계양구',
# '28260': '서구',
# '28710': '강화군',
# '28720': '옹진군',
# '29110': '동구',
# '29140': '서구',
# '29155': '남구',
# '29170': '북구',
# '29200': '광산구',
# '30110': '동구',
# '30140': '중구',
# '30170': '서구',
# '30200': '유성구',
# '30230': '대덕구',
# '31110': '중구',
# '31140': '남구',
# '31170': '동구',
# '31200': '북구',
# '31710': '울주군',
# '43113': '청주흥덕구',
# '43114': '청주청원구',
# '43130': '충주시',
# '43150': '제천시',
# '43720': '보은군',
# '43730': '옥천군',
# '43740': '영동군',
# '43745': '증평군',
# '43750': '진천군',
# '43760': '괴산군',
# '43770': '음성군',
# '43800': '단양군',
# '44131': '천안동남구',
# '44133': '천안서북구',
# '44150': '공주시',
# '44180': '보령시',
# '44200': '아산시',
# '44210': '서산시',
# '44230': '논산시',
# '44250': '계룡시',
# '44270': '당진시',
# '44710': '금산군',
# '44760': '부여군',
# '44770': '서천군',
# '44790': '청양군',
# '44800': '홍성군',
# '44810': '예산군',
# '44825': '태안군',
# '45111': '전주완산구',
# '45113': '전주덕진구',
# '45130': '군산시',
# '45140': '익산시',
# '45180': '정읍시',
# '45190': '남원시',
# '45210': '김제시',
# '45710': '완주군',
# '45720': '진안군',
# '45730': '무주군',
# '45740': '장수군',
# '45750': '임실군',
# '45770': '순창군',
# '45790': '고창군',
# '45800': '부안군',
# '46110': '목포시',
# '46130': '여수시',
# '46150': '순천시',
# '46170': '나주시',
# '46230': '광양시',
# '46710': '담양군',
# '46720': '곡성군',
# '46730': '구례군',
# '46770': '고흥군',
# '46780': '보성군',
# '46790': '화순군',
# '46800': '장흥군',
# '46810': '강진군',
# '46820': '해남군',
# '46830': '영암군',
# '46840': '무안군',
# '46860': '함평군',
# '46870': '영광군',
# '46880': '장성군',
# '46890': '완도군',
# '46900': '진도군',
# '46910': '신안군',
# '47111': '포항남구',
# '47113': '포항북구',
# '47130': '경주시',
# '47150': '김천시',
# '47170': '안동시',
# '47190': '구미시',
# '47210': '영주시',
# '47230': '영천시',
# '47250': '상주시',
# '47280': '문경시',
# '47290': '경산시',
# '47720': '군위군',
# '47730': '의성군',
# '47750': '청송군',
# '47760': '영양군',
# '47770': '영덕군',
# '47820': '청도군',
# '47830': '고령군',
# '47840': '성주군',
# '47850': '칠곡군',
# '47900': '예천군',
# '47920': '봉화군',
# '47930': '울진군',
# '47940': '울릉군',
# '48121': '창원의창구',
# '48123': '창원성산구',
# '48125': '창원마산합포구',
# '48127': '창원마산회원구',
# '48129': '창원진해구',
# '48170': '진주시',
# '48220': '통영시',
# '48240': '사천시',
# '48250': '김해시',
# '48270': '밀양시',
# '48310': '거제시',
# '48330': '양산시',
# '48720': '의령군',
# '48730': '함안군',
# '48740': '창녕군',
# '48820': '고성군',
# '48840': '남해군',
# '48850': '하동군',
# '48860': '산청군',
# '48870': '함양군',
# '48880': '거창군',
# '48890': '합천군',
now = datetime.now()
# time_str = '%4d%02d' % (now.year, now.month)
time_str = '%4d%02d' % (now.year, now.month - 1)
for i in range(0, 10):
for j in range(1, 13):
if i == 0:
if j > now.month:
break
time_str = '%4d%02d' % (now.year - i, j)
apt_trade_url = os.environ.get('DATA_APT_TRADE_URL')
data_svc_key = os.environ.get('DATA_APT_API_KEY')
# apt_district_code
for district_code, district in d_code.items():
request_url = '%s?LAWD_CD=%s&DEAL_YMD=%s&serviceKey=%s' % (
apt_trade_url, district_code, time_str, data_svc_key)
request_realstate_trade(request_url, district, conn, cursor)
def is_exist_trade(district, dong, apt_name,
apt_built_year, apt_size, apt_floor,
trade_date, trade_price):
query = '''SELECT * FROM realestate WHERE \
district="%s" AND dong="%s" AND apt_name="%s" AND \
apt_built_year="%s" AND apt_size="%s" AND apt_floor="%s" AND \
trade_date="%s" AND trade_price="%s"
''' % (district, dong, apt_name, apt_built_year, apt_size, apt_floor,
trade_date, trade_price)
return jp_sqlite3_select(conn, cursor, query)
def request_realstate_trade(request_url, district, conn, cursor):
req = urllib.request.Request(request_url)
try:
res = urllib.request.urlopen(req)
except UnicodeEncodeError:
print('[OpenAPI] UnicodeEncodeError')
return
data = res.read().decode('utf-8')
soup = BeautifulSoup(data, 'html.parser')
if (soup.resultcode.string != '00'):
print('[OpenAPI] ', soup.resultmsg.string)
return
items = soup.findAll('item')
for item in items:
try:
infos = re.split('<.*?>', item.text)
except TypeError:
continue
try:
apt_size = float(infos[8])
except ValueError:
print(district, infos)
continue
trade_infos = infos[1:]
for idx, info in enumerate(trade_infos):
if idx == 0:
trade_price = info.strip().replace(',', '')
elif idx == 1:
apt_built_year = info
elif idx == 2:
apt_trade_year = info
elif idx == 3:
dong = info
elif idx == 4:
apt_name = info
elif idx == 5:
apt_trade_month = info
elif idx == 6:
apt_trade_day = info
elif idx == 7:
apt_size = info
elif idx == 10:
apt_floor = info
trade_date = '%s-%02d-%s' % (apt_trade_year, int(apt_trade_month), apt_trade_day)
if is_exist_trade(district, dong, apt_name,
apt_built_year, apt_size, apt_floor,
trade_date, trade_price) is True:
continue
msg = "%s %s %s, %s/%s층 %s" % (
district, dong, apt_name, apt_size, apt_floor, trade_price)
print(msg)
query = '''INSERT OR REPLACE INTO realestate VALUES
("%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s")''' % (
district, dong, apt_name,
apt_built_year, apt_size, apt_floor,
trade_date, trade_price)
jp_sqlite3_insert(conn, cursor, query)
return
def realstate_trade(conn, cursor):
now = datetime.now()
time_str = '%4d%02d' % (now.year, now.month)
apt_trade_url = os.environ.get('DATA_APT_TRADE_URL')
data_svc_key = os.environ.get('DATA_APT_API_KEY')
for district_code, district in LOC_CODE.items():
request_url = '%s?LAWD_CD=%s&DEAL_YMD=%s&serviceKey=%s' % (
apt_trade_url, district_code, time_str, data_svc_key)
request_realstate_trade(request_url, district, conn, cursor)
'''
종로구 [' 130,000', '2008', '2018', ' 무악동', '인왕산아이파크', '1', '21~31', '157.289', '60', '11110', '11']
'''
if __name__ == '__main__':
conn = sqlite3.connect('jp_korea.db')
cursor = conn.cursor()
jp_sqlite3_init(conn, cursor)
realstate_trade(conn, cursor)
conn.close()
|
python
|
#
# This is a dummy class to illustrate the use of cvui in a project
# that contains multiple files.
#
# Copyright (c) 2018 Fernando Bevilacqua <[email protected]>
# Licensed under the MIT license.
#
import cvui
class Class1:
def __init__(self):
self.checked = [False]
def renderInfo(self, frame):
cvui.window(frame, 10, 50, 100, 120, 'Info')
cvui.checkbox(frame, 15, 80, 'Checked', self.checked)
|
python
|
# from subtlepatterns.com
bg = """\
iVBORw0KGgoAAAANSUhEUgAAABIAAAANCAMAAACTkM4rAAAAM1BMVEXy8vLz8/P5+fn19fXt7e329vb4+Pj09PTv7+/u7u739/fw8PD7+/vx8fHr6+v6+vrs7Oz2LjW2AAAAkUlEQVR42g3KyXHAQAwDQYAQj12ItvOP1qqZZwMMPVnd06XToQvz4L2HDQ2iRgkvA7yPPB+JD+OUPnfzZ0JNZh6kkQus5NUmR7g4Jpxv5XN6nYWNmtlq9o3zuK6w3XRsE1pQIEGPIsdtTP3m2cYwlPv6MbL8/QASsKppZefyDmJPbxvxa/NrX1TJ1yp20fhj9D+SiAWWLU8myQAAAABJRU5ErkJggg==
"""
|
python
|
#!/usr/bin/env python
# PNG2PDF - Graphic interface related class and methods.
from gi.repository import Gtk, Gio
from gi.repository.GdkPixbuf import Pixbuf, InterpType
from wand.image import Image
from wand.exceptions import BlobError
class GuiWindow(Gtk.Window):
"""
PNG2PDF main window class, derivates from Gtk window class.
"""
def __init__(self):
"""
Creates main window.
"""
Gtk.Window.__init__(self, title='PNG2PDF')
self.set_border_width(10)
self.set_default_size(400, 200)
# self.set_icon_from_file("../misc/png2pdf.svg")
self.fileList = []
hb = Gtk.HeaderBar()
hb.set_show_close_button(True)
hb.props.title = 'PNG2PDF'
self.set_titlebar(hb)
addButton = Gtk.Button()
addButton.connect('clicked', self.addFile)
addIcon = Gio.ThemedIcon(name='list-add')
addImage = Gtk.Image.new_from_gicon(addIcon, Gtk.IconSize.BUTTON)
addButton.add(addImage)
hb.pack_start(addButton)
saveButton = Gtk.Button()
saveButton.connect('clicked', self.saveFile)
saveIcon = Gio.ThemedIcon(name='document-save')
saveImage = Gtk.Image.new_from_gicon(saveIcon, Gtk.IconSize.BUTTON)
saveButton.add(saveImage)
hb.pack_end(saveButton)
self.listStore = Gtk.ListStore(Pixbuf)
iconView = Gtk.IconView.new()
iconView.set_model(self.listStore)
iconView.set_pixbuf_column(0)
self.add(iconView)
def imgConvert(self, fileList=[], fileName=''):
"""
Convert images from fileList in pdf file named fileName.
"""
with Image() as img:
for file in fileList:
try:
img.read(filename=file)
except BlobError as e:
x = e.args[0]
raise IOError(x)
with img.convert('pdf') as converted:
try:
converted.save(filename=fileName)
except BlobError as e:
x = e.args[0]
raise IOError(x)
except IOError as e:
x = e.args[0]
raise IOError(x)
def addFile(self, widget):
"""
Add file (image) dialog.
"""
dialog = Gtk.FileChooserDialog("Please choose a file", self,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
#self.add_filters(dialog)
response = dialog.run()
if response == Gtk.ResponseType.OK:
fileName = dialog.get_filename()
self.fileList.append(fileName)
pixBuf = Pixbuf.new_from_file_at_size(fileName, 120, 120)
self.listStore.append([pixBuf])
elif response == Gtk.ResponseType.CANCEL:
print("Cancel clicked")
dialog.destroy()
def saveFile(self, widget):
"""
Save file name (PDF) dialog.
"""
dialog = Gtk.FileChooserDialog("Please choose a folder", self,
Gtk.FileChooserAction.SAVE,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
"Select", Gtk.ResponseType.OK))
dialog.set_default_size(800, 400)
dialog.set_current_name('document.pdf')
response = dialog.run()
if response == Gtk.ResponseType.OK:
fileName = dialog.get_filename()
self.imgConvert(self.fileList, fileName)
elif response == Gtk.ResponseType.CANCEL:
print("Cancel clicked")
dialog.destroy()
class Converter:
def __init__(self):
self.win = GuiWindow()
self.win.connect('delete-event', Gtk.main_quit)
self.win.show_all()
Gtk.main()
|
python
|
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
METRIC_MAP = {
'consul_client_rpc': 'client.rpc',
'consul_client_rpc_failed': 'client.rpc.failed',
'consul_memberlist_degraded': 'memberlist.degraded',
'consul_memberlist_gossip': 'memberlist.gossip',
'consul_memberlist_health_score': 'memberlist.health.score',
'consul_memberlist_msg_alive': 'memberlist.msg.alive',
'consul_memberlist_msg_dead': 'memberlist.msg.dead',
'consul_memberlist_msg_suspect': 'memberlist.msg.suspect',
'consul_memberlist_probeNode': 'memberlist.probenode',
'consul_memberlist_pushPullNode': 'memberlist.pushpullnode',
'consul_memberlist_tcp_accept': 'memberlist.tcp.accept',
'consul_memberlist_tcp_connect': 'memberlist.tcp.connect',
'consul_memberlist_tcp_sent': 'memberlist.tcp.sent',
'consul_memberlist_udp_received': 'memberlist.udp.received',
'consul_memberlist_udp_sent': 'memberlist.udp.sent',
'consul_raft_state_leader': 'raft.state.leader',
'consul_raft_state_candidate': 'raft.state.candidate',
'consul_raft_state_apply': 'raft.apply',
'consul_raft_commitTime': 'raft.commitTime',
'consul_raft_leader_dispatchLog': 'raft.leader.dispatchLog',
'consul_raft_leader_lastContact': 'raft.leader.lastContact',
'consul_runtime_gc_pause_ns': 'runtime.gc_pause_ns',
'consul_serf_events': 'serf.events',
'consul_serf_coordinate_adjustment_ms': 'serf.coordinate.adjustment_ms',
'consul_serf_member_flap': 'serf.member.flap',
'consul_serf_member_join': 'serf.member.join',
'consul_serf_member_update': 'serf.member.update',
'consul_serf_member_left': 'serf.member.left',
'consul_serf_member_failed': 'serf.member.failed',
'consul_serf_msgs_received': 'serf.msgs.received',
'consul_serf_msgs_sent': 'serf.msgs.sent',
'consul_serf_queue_Event': 'serf.queue.event',
'consul_serf_queue_Intent': 'serf.queue.intent',
'consul_serf_queue_Query': 'serf.queue.query',
'consul_serf_snapshot_appendline': 'serf.snapshot.appendLine',
'consul_serf_snapshot_compact': 'serf.snapshot.compact',
# Available since 1.9.0
'consul_api_http': 'http.request',
'consul_raft_replication_installSnapshot': 'raft.replication.installSnapshot',
'consul_raft_replication_heartbeat': 'raft.replication.heartbeat',
'consul_raft_replication_appendEntries_rpc': 'raft.replication.appendEntries.rpc',
'consul_raft_replication_appendEntries_logs': 'raft.replication.appendEntries.logs',
}
|
python
|
import os as os
import numpy as np
import pandas as pd
import re
import botutils
zx=pd.read_csv('./recipies_full_v2.csv',index_col=0)
def Recuperador(ingredientes,df=zx,Criterio='Ingredientes'):
len_ing=[]
match=[]
qw=0
for i in range(len(df)):
len_ing.append(len(df.Ingredientes.iloc[i].split(',')))
df['Num_I']=len_ing
for j in range(len(df)):
for k in range(len(ingredientes)):
if ingredientes[k] in df[Criterio].iloc[j]:
qw+=1
match.append(qw)
qw=0
df['Match']=match
df['Dif']=df['Num_I']-df['Match']
df['Score']=df['Match']- 0.3*df['Dif']
df=df.sort_values('Score')
idxs=df.index[-3:].to_list()
idxs=[str(i) for i in idxs]
return idxs
def processTweet2(tweet,df=zx):
KWD=re.findall('#[\w]+',tweet)
KWD=[k[1:] for k in KWD]
KWD=botutils.lematize(KWD)
return Recuperador(KWD,df)
if __name__=="__main__":
pass
|
python
|
from flask import render_template, session, flash, request, redirect
import tags
import awstools
import contestmode
import language
from datetime import datetime, timedelta
def home():
if contestmode.contest():
return redirect(f'/contest/{contestmode.contestId()}')
userinfo = awstools.getCurrentUserInfo()
languages_inverse = language.get_languages_inverse()
if userinfo != None:
userSubmissionList = awstools.getSubmissionsList(1, None, userinfo['username'])
userSubmissionList = userSubmissionList[:8]
for i in userSubmissionList:
i['language'] = languages_inverse[i['language']]
else:
userSubmissionList = None
globalSubmissionList = sorted(awstools.getSubmissionsList(1, None, None),key=lambda x:x["subId"], reverse=True)
globalSubmissionList = globalSubmissionList[:8]
for i in globalSubmissionList:
i['language'] = languages_inverse[i['language']]
if userinfo != None:
username = userinfo["username"]
else:
username = ""
contestInfos = [i for i in awstools.getAllContests() if i["endTime"] != "Unlimited"]
if userinfo == None:
contestInfos = [i for i in contestInfos if i["public"]]
elif "admin" not in userinfo["role"]:
contestInfos = [i for i in contestInfos if (i["public"] or userinfo["username"] in i["users"])]
subsPerDay = awstools.getSubsPerDay()
credits_info = awstools.credits_page()
return render_template('home.html',
userinfo=userinfo,
globalSubmissionList=globalSubmissionList,
userSubmissionList=userSubmissionList,
contestInfos=contestInfos,
statistics=awstools.credits_page(),
socket=contestmode.socket(),
subsPerDay=subsPerDay)
|
python
|
import socket
import sys
import traceback
import random
def upper_monitor():
examples = ["Normal#ru-RU#Здраствуйте, меня зовут Жулдз, я робот гид", "Happy#ru-RU#Я так рада что вы тут",
"Sad#ru-RU#Простите, я вас не поняла, можете, пожалуйста, перефразировать",
"Angry#ru-RU#Алё, тупое быдло, дай дорогу, королева идет", "Sexy#en-GB#Let`s do it, baby",
"Normal#None#None"]
message = examples[random.randint(0, 5)]
print(message)
return message
def lower_monitor():
examples = ["Kazkosmos", "KGS", "Matrix"]
picture = examples[random.randint(0, 3)]
print(picture)
return picture
def client_thread(types_off, conn, ip, port, MAX_BUFFER_SIZE = 4096):
# the input is in bytes, so decode it
input_from_client_bytes = conn.recv(MAX_BUFFER_SIZE)
# MAX_BUFFER_SIZE is how big the message can be
# this is test if it's sufficiently big
siz = sys.getsizeof(input_from_client_bytes)
if siz >= MAX_BUFFER_SIZE:
print("The length of input is probably too long: {}".format(siz))
# decode input and strip the end of line
input_from_client = input_from_client_bytes.decode("utf8").rstrip()
print(input_from_client)
if types_off == 1:
res = upper_monitor()
else:
res = lower_monitor()
print("Result of processing {} is: {}".format(input_from_client, res))
vysl = res.encode("utf8") # encode the result string
conn.sendall(vysl) # send it to client
# conn.close() # close connection
# print('Connection ' + ip + ':' + port + " ended")
def start_server():
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# this is for easy starting/killing the app
soc.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print('Socket 1 created')
soc2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# this is for easy starting/killing the app
soc2.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
print('Socket 2 created')
try:
soc.bind(("192.168.8.104", 6666))
soc2.bind(("192.168.8.104", 7777))
print('Socket bind complete')
except socket.error as msg:
print('Bind failed. Error : ' + str(sys.exc_info()))
sys.exit()
# Start listening on socket
soc.listen(1)
soc2.listen(1)
print('Socket now listening')
# this will make an infinite loop needed for
# not reseting server for every client
conn, addr = soc.accept()
ip, port = str(addr[0]), str(addr[1])
print('Accepting connection 1 from ' + ip + ':' + port)
conn2, addr2 = soc2.accept()
ip2, port2 = str(addr[0]), str(addr[1])
print('Accepting connection 2 from ' + ip2 + ':' + port2)
while True:
try:
print("First one:")
client_thread(1, conn, ip, port)
except:
print("Terrible error!")
traceback.print_exc()
soc.close()
try:
print("Second one:")
client_thread(0, conn2, ip2, port2)
except:
print("Terrible error!")
traceback.print_exc()
soc.close()
start_server()
|
python
|
"""
https://tinkerpop.apache.org/docs/current/reference/
"""
from typing import Hashable, Generator, Iterable
import time
import pandas as pd
from gremlin_python.structure.graph import Graph
from gremlin_python.process.graph_traversal import __, GraphTraversalSource
from gremlin_python.driver.driver_remote_connection import DriverRemoteConnection
from .backend import Backend
ID = "__id"
EDGE_NAME = "__edge"
NODE_NAME = "__node"
def _node_to_metadata(n):
return {k if isinstance(k, str) else k.name: v for k, v in n.items()}
class GremlinBackend(Backend):
"""
A backend instance for Gremlin-compatible graph databases.
"""
def __init__(self, graph: GraphTraversalSource, directed: bool = True):
"""
Create a new Backend instance wrapping a Gremlin endpoint.
Arguments:
directed (bool: False): Whether to make the backend graph directed
Returns:
None
"""
self._g = graph
def is_directed(self) -> bool:
"""
Return True if the backend graph is directed.
The Gremlin-backed datastore is always directed.
Arguments:
None
Returns:
bool: True if the backend graph is directed.
"""
return True
def add_node(self, node_name: Hashable, metadata: dict):
"""
Add a new node to the graph.
Arguments:
node_name (Hashable): The ID of the node
metadata (dict: None): An optional dictionary of metadata
Returns:
Hashable: The ID of this node, as inserted
"""
if self.has_node(node_name):
# Retrieve the existing node; we will update the props.
v = self._g.V().has(ID, node_name)
else:
v = self._g.addV().property(ID, node_name)
for key, val in metadata.items():
v = v.property(key, val)
return v.toList()[0]
def get_node_by_id(self, node_name: Hashable):
"""
Return the data associated with a node.
Arguments:
node_name (Hashable): The node ID to look up
Returns:
dict: The metadata associated with this node
"""
try:
return _node_to_metadata(
self._g.V().has(ID, node_name).valueMap(True).toList()[0]
)
except IndexError as e:
raise KeyError() from e
def has_node(self, u: Hashable) -> bool:
"""
Return the data associated with a node.
Arguments:
node_name (Hashable): The node ID to look up
Returns:
dict: The metadata associated with this node
"""
try:
self.get_node_by_id(u)
return True
except KeyError:
return False
def remove_node(self, node_name: Hashable):
"""
Remove a node.
Arguments:
node_name (Hashable): The node ID to look up
Returns:
dict: The metadata associated with this node
"""
return self._g.V().has(ID, node_name).drop().toList()
def all_nodes_as_iterable(self, include_metadata: bool = False) -> Generator:
"""
Get a generator of all of the nodes in this graph.
Arguments:
include_metadata (bool: False): Whether to include node metadata in
the response
Returns:
Generator: A generator of all nodes (arbitrary sort)
"""
if include_metadata:
return iter(
[
{n[ID][0]: _node_to_metadata(n)}
for n in self._g.V().valueMap(True).toList()
]
)
else:
return iter([n[ID] for n in self._g.V().project(ID).by(ID).toList()])
def add_edge(self, u: Hashable, v: Hashable, metadata: dict):
"""
Add a new edge to the graph between two nodes.
If the graph is directed, this edge will start (source) at the `u` node
and end (target) at the `v` node.
Arguments:
u (Hashable): The source node ID
v (Hashable): The target node ID
metadata (dict): Optional metadata to associate with the edge
Returns:
Hashable: The edge ID, as inserted.
"""
try:
self.get_edge_by_id(u, v)
e = self._g.V().has(ID, u).outE().as_("e").inV().has(ID, v).select("e")
except IndexError:
if not self.has_node(u):
self.add_node(u, {})
if not self.has_node(v):
self.add_node(v, {})
e = (
self._g.V()
.has(ID, u)
.addE(EDGE_NAME)
.as_("e")
.to(__.V().has(ID, v))
.select("e")
)
for key, val in metadata.items():
e = e.property(key, val)
return e.toList()
def all_edges_as_iterable(self, include_metadata: bool = False) -> Generator:
"""
Get a list of all edges in this graph, arbitrary sort.
Arguments:
include_metadata (bool: False): Whether to include edge metadata
Returns:
Generator: A generator of all edges (arbitrary sort)
"""
if include_metadata:
return iter(
[
(e["source"], e["target"], _node_to_metadata(e["properties"]))
for e in (
self._g.V()
.outE()
.project("target", "source", "properties")
.by(__.inV().values(ID))
.by(__.outV().values(ID))
.by(__.valueMap(True))
.toList()
)
]
)
return iter(
[
(e["source"], e["target"])
for e in self._g.V()
.outE()
.project("target", "source")
.by(__.inV().values(ID))
.by(__.outV().values(ID))
.toList()
]
)
def get_edge_by_id(self, u: Hashable, v: Hashable):
"""
Get an edge by its source and target IDs.
Arguments:
u (Hashable): The source node ID
v (Hashable): The target node ID
Returns:
dict: Metadata associated with this edge
"""
return (
self._g.V()
.has(ID, u)
.outE()
.as_("e")
.inV()
.has(ID, v)
.select("e")
.properties()
.toList()
)[0]
def get_node_neighbors(
self, u: Hashable, include_metadata: bool = False
) -> Generator:
"""
Get a generator of all downstream nodes from this node.
Arguments:
u (Hashable): The source node ID
Returns:
Generator
"""
if include_metadata:
return {
e["target"]: _node_to_metadata(e["properties"])
for e in (
self._g.V()
.has(ID, u)
.outE()
.project("target", "source", "properties")
.by(__.inV().values(ID))
.by(__.outV().values(ID))
.by(__.valueMap(True))
.toList()
)
}
return self._g.V().has(ID, u).out().values(ID).toList()
def get_node_predecessors(
self, u: Hashable, include_metadata: bool = False
) -> Generator:
"""
Get a generator of all downstream nodes from this node.
Arguments:
u (Hashable): The source node ID
Returns:
Generator
"""
if include_metadata:
return {
e["source"]: e
for e in (
self._g.V()
.has(ID, u)
.inE()
.project("target", "source", "properties")
.by(__.inV().values(ID))
.by(__.outV().values(ID))
.by(__.valueMap(True))
.toList()
)
}
return self._g.V().out().has(ID, u).values(ID).toList()
def get_node_count(self) -> Iterable:
"""
Get an integer count of the number of nodes in this graph.
Arguments:
None
Returns:
int: The count of nodes
"""
return self._g.V().count().toList()[0]
def teardown(self) -> None:
self._g.V().drop().toList()
|
python
|
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management import BaseCommand
class Command(BaseCommand):
"""Djanho commandf to pause executuoin until datbase is availane"""
def handle(self, *args, **options):
self.stdout.write('Waiting for datbase...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database uanvailabel, waiint 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!'))
|
python
|
import csv
import requests
def getArray():
# To use a local file, comment out this part, and uncomment the next part
r = requests.get('https://github.com/HeardLibrary/digital-scholarship/raw/master/code/pylesson/challenge4/cartoons.csv')
fileText = r.text.split('\n')
if fileText[len(fileText)-1] == '':
fileText = fileText[0:len(fileText)-1]
readerObject = csv.DictReader(fileText)
cartoon = []
for row in readerObject:
cartoon.append(row)
'''
fileObject = open('cartoons.csv', 'r', newline='', encoding='utf-8')
readerObject = csv.DictReader(fileObject)
cartoon = []
for row in readerObject:
cartoon.append(row)
fileObject.close()
'''
return cartoon
def getWikidata(characterId):
endpointUrl = 'https://query.wikidata.org/sparql'
query = '''select distinct ?property ?value
where {
<''' + characterId + '''> ?propertyUri ?valueUri.
?valueUri <http://www.w3.org/2000/01/rdf-schema#label> ?value.
?genProp <http://wikiba.se/ontology#directClaim> ?propertyUri.
?genProp <http://www.w3.org/2000/01/rdf-schema#label> ?property.
FILTER(substr(str(?propertyUri),1,36)="http://www.wikidata.org/prop/direct/")
FILTER(LANG(?property) = "en")
FILTER(LANG(?value) = "en")
}'''
# The endpoint defaults to returning XML, so the Accept: header is required
r = requests.get(endpointUrl, params={'query' : query}, headers={'Accept' : 'application/sparql-results+json'})
data = r.json()
statements = data['results']['bindings']
return statements
# Main routine
cartoon = getArray()
inputCharacterName = input("What's the name of the character? ")
found = False
for characterIndex in range(1, len(cartoon)):
if inputCharacterName.lower() in cartoon[characterIndex]['name'].lower():
found = True
print('\n') # skip 2 lines
responseString = cartoon[characterIndex]['name'] + ' works for ' + cartoon[characterIndex]['company'] + '.'
if cartoon[characterIndex]['nemesis'] != '':
responseString += ' Its enemy is ' + cartoon[characterIndex]['nemesis']
print(responseString)
# Here's where we get the data from the WikiData API
print() # skip 1 line
print("Here's what WikiData knows about " + cartoon[characterIndex]['name'] + ':')
statements = getWikidata(cartoon[characterIndex]['wikidataId'])
for statement in statements:
print(statement['property']['value'] + ': ' + statement['value']['value'])
if not found:
print("Didn't find that character")
|
python
|
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import logging
import os
import pytest
# Bail on the test if ly_test_tools doesn't exist.
pytest.importorskip("ly_test_tools")
import editor_python_test_tools.hydra_test_utils as hydra
import ly_test_tools.environment.file_system as file_system
logger = logging.getLogger(__name__)
test_directory = os.path.join(os.path.dirname(__file__), "EditorScripts")
@pytest.mark.parametrize("project", ["AutomatedTesting"])
@pytest.mark.parametrize("level", ["tmp_level"])
@pytest.mark.usefixtures("automatic_process_killer")
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
class TestRotationModifier(object):
@pytest.fixture(autouse=True)
def setup_teardown(self, request, workspace, project, level):
def teardown():
# delete temp level
file_system.delete([os.path.join(workspace.paths.engine_root(), project, "Levels", level)], True, True)
# Setup - add the teardown finalizer
request.addfinalizer(teardown)
file_system.delete([os.path.join(workspace.paths.engine_root(), project, "Levels", level)], True, True)
@pytest.mark.test_case_id("C4896922")
@pytest.mark.SUITE_periodic
@pytest.mark.dynveg_modifier
def test_RotationModifier_InstancesRotateWithinRange(self, request, editor, level, launcher_platform) -> None:
"""
Launches editor and run test script to test that rotation modifier works for all axis.
Manual test case: C4896922
"""
expected_lines = [
"'Spawner Entity' created",
"'Surface Entity' created",
"'Gradient Entity' created",
"Entity has a Vegetation Asset List component",
"Entity has a Vegetation Layer Spawner component",
"Entity has a Vegetation Rotation Modifier component",
"Entity has a Box Shape component",
"Entity has a Constant Gradient component",
"RotationModifier_InstancesRotateWithinRange: result=SUCCESS"
]
hydra.launch_and_validate_results(
request,
test_directory,
editor,
"RotationModifier_InstancesRotateWithinRange.py",
expected_lines,
cfg_args=[level]
)
@pytest.mark.test_case_id("C4814460")
@pytest.mark.SUITE_periodic
@pytest.mark.dynveg_modifier
def test_RotationModifierOverrides_InstancesRotateWithinRange(self, request, editor, level, launcher_platform) -> None:
expected_lines = [
"'Spawner Entity' created",
"'Surface Entity' created",
"'Gradient Entity' created",
"Entity has a Vegetation Layer Spawner component",
"Entity has a Vegetation Asset List component",
"Spawner Entity Box Shape|Box Configuration|Dimensions: SUCCESS",
"Entity has a Vegetation Rotation Modifier component",
"Spawner Entity Configuration|Embedded Assets|[0]|Rotation Modifier|Override Enabled: SUCCESS",
"Spawner Entity Configuration|Allow Per-Item Overrides: SUCCESS",
"Entity has a Constant Gradient component",
"Entity has a Box Shape component",
"Spawner Entity Configuration|Rotation Z|Gradient|Gradient Entity Id: SUCCESS",
"RotationModifierOverrides_InstancesRotateWithinRange: result=SUCCESS"
]
hydra.launch_and_validate_results(
request,
test_directory,
editor,
"RotationModifierOverrides_InstancesRotateWithinRange.py",
expected_lines,
cfg_args=[level]
)
|
python
|
import os
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
# Read metadata from version file
def get_version():
with open("dtcwt_gainlayer/__init__.py") as f:
for line in f:
if line.startswith("__version__"):
return line[15:-2]
raise Exception("Could not find version number")
setup(
name='dtcwt_gainlayer',
author="Fergal Cotter",
version=get_version(),
author_email="[email protected]",
description=("Wavelet based image classifier for cifar datasets"),
license="MIT",
keywords="wavelet, complex wavelet, DT-CWT, tensorflow, cifar, classifier",
url="https://github.com/fbcotter/dtcwt_gainlayer",
packages=find_packages(exclude=["tests.*", "tests"]),
long_description=read('README.rst'),
classifiers=[
"Development Status :: 3 - Alpha",
"License :: Free To Use But Restricted",
"Programming Language :: Python :: 3",
],
include_package_data=True
)
# vim:sw=4:sts=4
|
python
|
# library to do some animations
# assumes you want to do a x/y plot and an energy plot
# empty placeholders for different plots
def plot_animations(fig, ax, t, E, r):
nFrames = len(t) #number frames in our animation
n_part = r.shape[0] #number particles
# initialize our plots with empty data
trajs = []
for i in range(n_part):
tr, = ax[0].plot([],[])
trajs.append(tr)
energy, = ax[1].plot([],[])
# set bounds based on data
ax[0].set_xlim(r.min(), r.max())
ax[0].set_ylim(r.min(), r.max())
ax[1].set_xlim(t.min(),t.max())
ax[1].set_ylim(E.min(),E.max())
# names
ax[0].set_xlabel('x in AU')
ax[0].set_ylabel('y in AU')
# energy
ax[1].set_xlabel('Time in seconds')
ax[1].set_ylabel('Normalized Energy')
# below are functions animation.FuncAnimation needs
# we need to initialize stuff - just setting data
def init():
# multiple planets
for trajectory in trajs:
#print(trajectory)
trajectory.set_data([],[])
energy.set_data([], [])
# note: we have to do some special formatting to
# get the correct output form for animate function
outarr = trajs.copy()
outarr.append(energy)
return tuple(outarr)
# now, each time we step through
def animate(i):
for j,trajectory in enumerate(trajs):
trajectory.set_data(r[j,0,:i],
r[j,1,:i])
energy.set_data(t[:i], E[:i])
# note: we have to do some special formatting to
# get the correct output form for animate function
outarr = trajs.copy()
outarr.append(energy)
return tuple(outarr)
return init, animate, nFrames
|
python
|
import os
import pathlib
import enum
import warnings
import colorama
import requests
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import invoke
class MoleculeDriver(enum.Enum):
docker = 1
lxd = 2
vagrant = 3
class TestPlatform(enum.Enum):
linux = 1
ubuntu = 2
centos = 3
def print_header(header_text):
print(
colorama.Fore.CYAN + colorama.Style.BRIGHT +
f" {header_text} ".center(80, "=") +
colorama.Style.RESET_ALL
)
def print_sub_header(sub_header_text):
print(
colorama.Fore.CYAN + colorama.Style.BRIGHT + "--" +
f" {sub_header_text} ".ljust(78, "-") +
colorama.Style.RESET_ALL
)
def print_success_message(success_message_text):
print(
colorama.Fore.GREEN + colorama.Style.BRIGHT +
f" {success_message_text}: Success ".center(80, "=") +
colorama.Style.RESET_ALL
)
def run_command(context, *args, **kwargs):
try:
return context.run(*args, **kwargs)
except invoke.exceptions.Failure:
print(
colorama.Fore.RED + colorama.Style.BRIGHT +
"Failure: error executing '" + args[0] + "' command" +
colorama.Style.RESET_ALL
)
raise
def get_base_config_path(driver_code, platform_code):
base_config = "molecule/molecule_base_{driver}_{platform}.yml".format(
driver=driver_code.name, platform=platform_code.name
)
return str(pathlib.Path(__file__).resolve().parent / base_config)
def get_molecule_scenarios(context):
scenarios = []
for child_obj in (pathlib.Path.cwd() / "molecule").iterdir():
if child_obj.is_dir():
if (child_obj / "molecule.yml").exists():
scenarios.append(child_obj.name)
return sorted(scenarios)
def run_molecule(context, command, scenario, driver, platform="linux", env={}):
driver_code = MoleculeDriver[driver.lower()]
platform_code = TestPlatform[platform.lower()]
molecule_env = env.copy()
if driver_code == MoleculeDriver.lxd:
molecule_env.update({"MOLECULE_USER_NAME": "root"})
elif driver_code == MoleculeDriver.vagrant:
molecule_env.update({"MOLECULE_USER_NAME": "vagrant"})
molecule_command = (
f"molecule --base-config {get_base_config_path(driver_code, platform_code)} {command}"
)
if scenario is not None:
molecule_command += f" -s {scenario}"
run_command(context, molecule_command, env=molecule_env, echo=True)
def get_parameter_value(host, ansible_var_name, param_value, default_value):
if host.backend.HAS_RUN_ANSIBLE:
ansible_var_value = host.ansible.get_variables().get(ansible_var_name, None)
else:
ansible_var_value = None
return_value = ansible_var_value if param_value is None else param_value
if return_value is None:
return_value = default_value
return return_value
def get_github_release_info(release_url):
if "AO_GITHUB_OAUTH_TOKEN" in os.environ:
headers = {"Authorization": "token " + os.environ["AO_GITHUB_OAUTH_TOKEN"]}
else:
headers = None
return requests.get(
"https://api.github.com/repos/" + release_url, headers=headers
).json()
|
python
|
# Copyright (c) 2016 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from UM.FileHandler.FileReader import FileReader
from typing import Optional
class WorkspaceReader(FileReader):
def __init__(self) -> None:
super().__init__()
self._workspace_name = None # type: Optional[str]
## Read an entire workspace
def read(self, file_name: str):
pass
def workspaceName(self) -> Optional[str]:
return self._workspace_name
def setWorkspaceName(self, workspace_name: str) -> None:
self._workspace_name = workspace_name
|
python
|
import jetson.inference
import jetson.utils
import time
import cv2
import numpy as np
timeStamp=time.time()
fpsFilt=0
#Importing custom dataset and using ssd-mobilenet-v2 model
net=jetson.inference.detectNet('ssd-mobibilenet-v2',['--model=models/YOUR MODEL/ssd-mobilenet.onnx','--input-blob=input_0','--output-cvg=scores'
,'--output-bbox=boxes','--labels=models/YOUR LABEL.txt'], threshold=0.5)
dispW=640
dispH=480
font=cv2.FONT_HERSHEY_SIMPLEX
#Set camera for video feed, might vary for different cameras such as /dev/video1 or csi://0
cam-cv2.VideoCapture('/dev/video0')
cam.set(cv2.CAP_PROP_FRAME_WIDTH, dispW)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, dispH)
while True:
_,img = cam.read()
height=dispH
width=dispW
frame=cv2.cvtColor(img,cv2.COLOR_BGR2RGBA).astype(np.float32)
frame=jetson.utils.cudaFromNUmpy(frame)
detections=net.Detect(frame, width, height)
for detect in detections:
ID=detect.ClassID
top=detect.Top
left=detect.Left
bottom=detect.Bottom
right=detect.Right
item=net.GetClassDesc(ID)
#Displays the object detected
print(item)
dt=time.time()-timeStamp
timeStamp=time.time()
fps=1/dt
fpsFilt=0.9*fpsFilt + 0.1*fps
cv2.putText(img, str(round(fpsFilt,1))+' fps',(0,30),font,1,(0,0,255),2)
cv2.imshow('camdisplay',img)
cv2.moveWindow('camdisplay',0,0)
if cv2.waitKey(1)==ord('q'):
break
cam.release()
cv2.destroyAllWindows()
|
python
|
import os
from flask import Flask, jsonify
from flask_restful import Api
from flask_sqlalchemy import SQLAlchemy
from flask_jwt_extended import JWTManager
app = Flask(__name__)
if 'APP_CONFIG_FILE' in os.environ:
app.config.from_envvar('APP_CONFIG_FILE')
else:
app.config.from_pyfile('config/production.py')
db = SQLAlchemy(app)
@app.before_first_request
def create_tables():
db.create_all()
jwt = JWTManager(app)
import views, models
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return models.RevokedTokenModel.is_jti_blacklisted(jti)
api = Api(app)
api.add_resource(views.ImageProcessor, '/send/image')
api.add_resource(views.UserRegistration, '/registration')
api.add_resource(views.UserLogin, '/login')
api.add_resource(views.UserLogoutAccess, '/logout/access')
api.add_resource(views.UserLogoutRefresh, '/logout/refresh')
api.add_resource(views.TokenRefresh, '/token/refresh')
api.add_resource(views.AllUsers, '/users')
|
python
|
import os
from apscheduler.schedulers.blocking import BlockingScheduler
sched = BlockingScheduler()
@sched.scheduled_job('interval', hours=2)
def main():
print('[!] Starting crawling.')
os.system('python -m scrapy crawl imdnews')
print('[!] Ending crawling.')
print('[!] Sending messages.')
os.system('python bot.py')
print('[!] Messages sended.')
sched.start()
|
python
|
# coding: utf-8
from setuptools import setup, find_packages
import os
# not so bad: http://joebergantine.com/blog/2015/jul/17/releasing-package-pypi/
version = __import__('filer_addons').__version__
def read(fname):
# read the contents of a text file
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="django-filer-addons",
version=version,
url='https://github.com/rouxcode/django-filer-addons',
license='MIT Licence',
platforms=['OS Independent'],
description="django filer addons",
long_description=read('PYPI.rst'),
author=u'Ben Stähli',
author_email='[email protected]',
packages=find_packages(),
install_requires=(
'django-filer>=1.2',
),
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
test_suite='runtests.main',
tests_require=(
'argparse', # needed on python 2.6
),
)
|
python
|
from distutils.core import setup
setup(
name="pinax.checkers",
version="1.1",
author="Pinax",
author_email="[email protected]",
url="https://github.com/pinax/pinax-checkers",
description="Style checker for Pinax and Eldarion OSS",
license="BSD",
packages=[
"pinax",
"pinax.checkers",
],
install_requires=["pylint>=0.25.0"],
)
|
python
|
#!/usr/bin/python3
import copy
import random
with open('tiles.txt') as fh:
lines = fh.readlines()
tilestrs = ''.join(lines).split('\n\n')
tilestrs = {int(t.split('\n')[0][5:9]):'\n'.join(t.strip().split('\n')[1:]) for t in tilestrs}
tiles = {}
for tilekey,tilestr in tilestrs.items():
tile = []
for rowstr in tilestr.split('\n'):
tile.append(rowstr.strip())
tiles[tilekey] = tile
def ptile(tile):
print('\n'.join([' '.join(r) for r in tile]))
def vreflect(tile):
return [t for t in list(reversed(tile))]
def hreflect(tile):
return [list(reversed(t)) for t in tile]
def rotate(tile, degree):
ttile = tile
res = ttile
while degree > 0:
res = [['' for c in range(len(ttile))] for r in range(len(ttile[0]))]
for row in range(len(ttile[0])):
for col in range(len(ttile)):
res[row-1][col] = ttile[col][-row]
ttile = res
degree -= 1
return res
def transform(tile, vref, href, rot):
ttile = tile
if vref:
ttile = vreflect(ttile)
if href:
ttile = hreflect(ttile)
if rot:
ttile = rotate(ttile, rot)
return ttile
def memohash(vref, href, rot):
return (100 if vref else 0) + (10 if href else 0) + rot
memo = {}
def memoget(id, vref, href, rot):
if id not in memo:
return None
return memo[id].get(memohash(vref, href, rot), None)
def memoset(id, vref, href, rot, tile):
if id not in memo:
memo[id] = {}
memo[id][memohash(vref, href, rot)] = tile
def variants(id):
vars = []
for vref in [False,True]:
for href in [False,True]:
for rot in range(0,4):
v = memoget(id, vref, href, rot)
if not v:
v = transform(tiles[id], vref, href, rot)
memoset(id, vref, href, rot, v)
vars.append((id,vref,href,rot))
return vars
def fit(tile, othertile, pos):
# Pos = 0 -> other is to the right
# Pos = 1 -> other is above
# Pos = 2 -> other is to the left
# Pos = 3 -> other is below
if pos == 0:
edge = [r[-1] for r in tile]
otheredge = [r[0] for r in othertile]
if pos == 1:
edge = tile[0]
otheredge = othertile[-1]
if pos == 2:
edge = [r[0] for r in tile]
otheredge = [r[-1] for r in othertile]
if pos == 3:
edge = tile[-1]
otheredge = othertile[0]
for (e,o) in zip(edge,otheredge):
if e != o:
return False
return True
def memofithash(memotile, othermemotile, pos):
return str(memotile) + str(othermemotile) + str(pos)
memofitd = {}
def memofit(memotile, othermemotile, pos):
mfh = memofithash(memotile, othermemotile, pos)
if mfh not in memofitd:
memofitd[mfh] = fit(memoget(*memotile),memoget(*othermemotile),pos)
return memofitd[mfh]
# I counted 144 tiles, so it's a 12x12 square. If we use one of the corners as
# the starting point, then we need enough room for the whole puzzle to fill one
# quadrant. So use a 23x23 grid. For algorithmic simplicity, add an extra border
# slots around the edge
grid = [[None for _ in range(25)] for _ in range(25)]
pool = list(tiles.keys())
random.shuffle(list(reversed(pool)))
# Arbitrarily select tile 1669 as the starting point, with no transformations
grid[12][12] = (1669,0,0,0)
pool.remove(1669)
variants(1669)
def solve():
for row in range(len(grid)):
for col in range(len(grid)):
print(('[' + str(grid[row][col][0]) + ']' if grid[row][col] else '......'), end='')
print('')
print(pool)
for row in range(1, len(grid) - 1):
for col in range(1, len(grid[row]) - 1):
# If cell is already filled, we can't add a tile to it
if grid[row][col]:
continue
# If no neighbours are filled, don't waste time on this cell.
# This is the part that benefits from the extra border
right = grid[row][col+1]
above = grid[row-1][col]
left = grid[row][col-1]
below = grid[row+1][col]
if not right and not above and not left and not below:
continue
# Try all variants of all tiles from the pool
for id in pool:
for variant in variants(id):
if right and not memofit(variant, right, 0):
continue
if above and not memofit(variant, above, 1):
continue
if left and not memofit(variant, left, 2):
continue
if below and not memofit(variant, below, 3):
continue
# Found a variant that works. Remove from the pool, add to the
# grid, and recurse
idx = pool.index(id)
pool.remove(id)
grid[row][col] = variant
solve()
# If the pool is empty after recursing, we have a solution.
if not pool:
return
# Otherwise the solve failed and we are backtracking. Try
# the next variant.
grid[row][col] = None
pool.insert(idx,id)
solve()
for id,variants in memo.items():
for mh,variant in variants.items():
pruned = copy.deepcopy(variant)
pruned = pruned[1:-1]
pruned = [p[1:-1] for p in pruned]
memo[id][mh] = pruned
minrow = 0
for (idx,row) in enumerate(grid):
filled = 0
for cell in row:
if cell:
filled = 1
break
if filled:
minrow = idx
break
maxrow = 0
for (idx,row) in reversed(list(enumerate(grid))):
filled = 0
for cell in row:
if cell:
filled = 1
break
if filled:
maxrow = idx
break
mincol = 0
for (idx,cell) in enumerate(grid[minrow]):
if cell:
mincol = idx
break
maxcol = 0
for (idx,cell) in reversed(list(enumerate(grid[maxrow]))):
if cell:
maxcol = idx
break
trimmedgrid = grid[minrow:maxrow+1]
for idx,row in enumerate(trimmedgrid):
trimmedgrid[idx] = row[mincol:maxcol+1]
imagetiles = [[memoget(*c) for c in r] for r in trimmedgrid]
image = []
for tilerow in imagetiles:
for subrowidx in range(8):
subrow = []
for tile in tilerow:
subrow += tile[subrowidx]
image.append(subrow)
monsterimg = [list(' # '),
list('# ## ## ###'),
list(' # # # # # # ')]
monstervariants = []
for vref in [False,True]:
for href in [False,True]:
for rot in range(0,4):
monstervariants.append(transform(monsterimg, vref, href, rot))
for mvar in monstervariants:
for mrow in (mvar):
print(''.join(mrow))
print('')
inmonster = [[False for _ in r] for r in image]
def checkmonster(row, col, monster):
if row + len(monster) > len(image):
return False
if col + len(monster[0]) > len(image[row]):
return False
for mrow in range(len(monster)):
for mcol in range(len(monster[mrow])):
if monster[mrow][mcol] == '#' and image[row+mrow][col+mcol] != '#':
return False
return True
for row in range(len(image)):
for col in range(len(image[row])):
for mvar in monstervariants:
if checkmonster(row, col, mvar):
for mrow in range(len(mvar)):
for mcol in range(len(mvar[mrow])):
if mvar[mrow][mcol] == '#':
inmonster[row+mrow][col+mcol] = True
print('\n'.join([' '.join(r) for r in image]))
print('\n'.join(' '.join([{True:'#',False:' '}[c] for c in r]) for r in inmonster))
monstercount = 0
nonmonstercount = 0
for row in range(len(image)):
for col in range(len(image)):
if image[row][col] != '#':
continue
if inmonster[row][col]:
monstercount += 1
else:
nonmonstercount += 1
print(nonmonstercount)
|
python
|
import model as mo
import view as vi
custlist=[]
page=-1
view = vi.viewer()
model = mo.model()
while True:
choice = view.hello()
if choice=="I":
custlist, page = model.inputI(custlist, page)
elif choice=="C":
custlist, page = view.inputC(custlist, page)
elif choice == 'P':
custlist, page = view.inputP(custlist, page)
elif choice == 'N':
custlist, page = view.inputN(custlist, page)
elif choice=='D':
custlist, page = model.inputD(custlist, page)
elif choice=="U":
custlist, page = model.inputU(custlist, page)
elif choice=="S":
view.search()
elif choice=="Q":
model.write(custlist)
elif choice=="FU":
model.fupdate(custlist)
else:
break
|
python
|
def add_time(start, duration, start_day=''):
days = 0
real_duration = 0
days_of_week = [
'sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday'
]
hours_in_day = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24
]
start_hours = 0
start_minutes = 0
new_time = 0
end_day = ''
start_day = start_day.lower()
final_am_pm = None
n_days_later = '({} days later)'
final_hour = 0
final_time = '{}:{}'
final_minutes = 0
loop_count_hours = 0
def day_finder(day_counter):
nonlocal end_day
nonlocal days
day_counter = int(days)
start_day_index = days_of_week.index(start_day)
for g in days_of_week[start_day_index:]:
if day_counter > 0:
day_counter -= 1
elif day_counter == 0:
end_day = g
break
if end_day == '':
while day_counter >= 0:
for y in range(len(days_of_week)):
if day_counter > 0:
day_counter -= 1
elif day_counter == 0:
end_day = days_of_week[y]
day_counter -= 1
break
else:
break
def hour_finder(hour_counter):
nonlocal loop_count_hours
nonlocal final_am_pm
nonlocal final_hour
nonlocal hours_counter
nonlocal days
hour_counter = hours_counter
start_hour_index = hours_in_day.index(int(start_hours))
for j in hours_in_day[start_hour_index:]:
if hour_counter > 0:
hour_counter -= 1
if hour_counter == 0:
end_hour = hours_in_day[j]+1
final_hour = end_hour
if final_hour > 24:
final_am_pm = 'AM'
elif hour_counter == 0:
end_hour = hours_in_day[j]
if end_hour > 12:
final_am_pm = 'PM'
final_hour = end_hour
break
else:
final_hour = end_hour
final_am_pm = 'AM'
break
break
if j == hours_in_day[-1]:
loop_count_hours += 1
days += 1
if hour_counter > 0:
for z in range(len(hours_in_day)):
if hour_counter > 0:
hour_counter -= 1
elif hour_counter == 0:
end_hour = hours_in_day[z]+1
if end_hour > 12:
final_am_pm = 'PM'
final_hour = end_hour
break
else:
final_am_pm = 'AM'
final_hour = end_hour
break
else:
break
if z == hours_in_day[-1]:
loop_count_hours += 1
split_first = start.split(':')
start_hours = split_first[0]
split_second = split_first[1].split()
start_minutes = int(split_second[0])
if split_second[1] == 'PM':
start_hours = int(start_hours)+12
split_d = duration.split(':')
split_d_hours = int(split_d[0])
split_d_minutes = int(split_d[1])
if split_d_hours > len(range(24)):
days = int(split_d_hours // 24)
extra_hours = int(split_d_hours % 24)
real_duration = extra_hours
else:
real_duration = int(split_d_hours)
hours_counter = int(real_duration)
days_counter = int(days)
total_minutes = start_minutes + split_d_minutes
hour_finder(hours_counter)
if total_minutes > 60:
final_minutes = total_minutes - 60
int(final_hour)
final_hour += 1
if final_hour >= 12:
final_am_pm = 'PM'
if final_hour == 24:
final_am_pm = 'AM'
days += 1
if final_hour > 12:
final_hour -= 12
if final_hour > 12:
final_hour -= 12
else:
final_minutes = total_minutes
if final_hour == 0:
final_hour = 12
if final_hour > 12:
final_hour -= 12
if final_hour > 12:
final_hour -= 12
if final_minutes <= len(range(9)):
final_time = '{}:0{}'
if start_day != '':
day_finder(days_counter)
new_time = str(final_time.format(final_hour, final_minutes)) + ' ' + str(final_am_pm) + ',' + ' ' + end_day.capitalize() + ' ' + n_days_later.format(
days)
if days == 1 and loop_count_hours == 1:
new_time = str(final_time.format(final_hour, final_minutes)) + ' ' + \
str(final_am_pm) + ',' + ' ' + end_day.capitalize() + ' ' + '(next day)'
elif days == 0 and loop_count_hours == 0:
new_time = new_time = str(final_time.format(final_hour, final_minutes)) + \
' ' + str(final_am_pm) + ',' + ' ' + end_day.capitalize()
elif start_day == '':
if days < 1:
new_time = str(final_time.format(final_hour, final_minutes)) + ' ' + str(final_am_pm)
elif days > 1:
new_time = str(final_time.format(final_hour, final_minutes)) + ' ' + str(final_am_pm) + ' ' + n_days_later.format(
days)
if days == 1:
new_time = str(final_time.format(final_hour, final_minutes)) + \
' ' + str(final_am_pm) + ' ' + '(next day)'
return new_time
|
python
|
import json
from flask import request, send_file
from flask_restplus import Namespace, Resource, marshal
from .utils import *
from polylogyx.utils import require_api_key
from polylogyx.dao import carves_dao as dao
from polylogyx.dao import nodes_dao as nodedao
from polylogyx.wrappers import parent_wrappers as parentwrapper
from polylogyx.wrappers import carve_wrappers as wrapper
from polylogyx.constants import PolyLogyxServerDefaults
from polylogyx.models import DistributedQueryTask,db,CarveSession
ns = Namespace('carves', description='Carves related operations')
@require_api_key
@ns.route('/', endpoint='node_carves_list')
@ns.doc(params={'host_identifier': 'Host identifier of the Node'})
class NodeCarvesList(Resource):
'''lists out the carves for a specific node when host_identifier given otherwise returns all carves'''
parser = requestparse(['host_identifier'],[str],["host identifier of the node"])
@ns.expect(parser)
def post(self):
carves = None
status = 'success'
host_identifier = self.parser.parse_args()['host_identifier']
if host_identifier:
node = nodedao.get_node_by_host_identifier(host_identifier)
if not node:
status = 'failure'
message = 'Node with this identifier does not exist'
else:
carves = dao.get_carves_by_node_id(node.id)
carves = marshal(carves, wrapper.carves_wrapper)
message = 'Successfully fetched the carves'
else:
carves = dao.get_carves_all()
carves = marshal(carves, wrapper.carves_wrapper)
message = 'Successfully fetched the carves'
if not carves: message = "carves data doesn't exists for the input given"
return marshal(respcls(message,status,carves),parentwrapper.common_response_wrapper)
@require_api_key
@ns.route('/download/<string:session_id>', endpoint='download_carves')
@ns.doc(params={'session_id': 'session id'})
class DownloadCarves(Resource):
'''download carves through session id'''
def get(self, session_id):
status = 'failure'
message = 'Data missing'
if not session_id:
message = 'Please provide a session id'
else:
carve_session = dao.get_carves_by_session_id(session_id)
if carve_session:
status = 'success'
message = 'Successfully fetched the carves'
print ('file is : '+PolyLogyxServerDefaults.BASE_URL + '/carves/' + carve_session.node.host_identifier + '/'+carve_session.archive)
data = send_file(PolyLogyxServerDefaults.BASE_URL + '/carves/' + carve_session.node.host_identifier + '/'+ carve_session.archive , as_attachment=True, attachment_filename='carve_session.archive')
return data
else:
message = 'This session id does not exist'
return marshal(respcls(message,status), parentwrapper.common_response_wrapper, skip_none=True)
@require_api_key
@ns.route('/query/<int:query_id>/<string:host_identifier>', endpoint='get_carves_by_query_id')
@ns.doc(params={'query_id': 'query id','host_identifier': 'host identifier'})
class CarveSessionByQueryId(Resource):
'''download carves through session id'''
def post(self, query_id,host_identifier):
status = 'failure'
message = 'Data missing'
if not query_id:
message = 'Please provide a query id'
else:
if host_identifier:
node = nodedao.get_node_by_host_identifier(host_identifier)
if not node:
status = 'failure'
message = 'Node with this identifier does not exist'
else:
dqt=db.session.query(DistributedQueryTask).filter(DistributedQueryTask.distributed_query_id==query_id).filter(DistributedQueryTask.node_id==node.id).first()
if dqt:
carve_session=db.session.query(CarveSession).filter(CarveSession.request_id==dqt.guid).first()
carve_session = marshal(carve_session, wrapper.carves_wrapper)
if carve_session:
status = "success"
message="Successfully fetched the carve"
return marshal(respcls(message, status, carve_session), parentwrapper.common_response_wrapper)
else:
message="carve not started"
else:
message="query id provided is invalid"
return marshal(respcls(message,status), parentwrapper.common_response_wrapper, skip_none=True)
|
python
|
#!/usr/bin/python3
import re
with open("sar.txt") as fp:
reading = False
for l in fp:
s = l.split()
if len(s) == 0:
continue
m = re.match("[\d]{3,5}", s[0])
if (s[0] != "TOTAL") and (s[1] != "DO") and (m is None):
continue
if m is not None:
if reading:
print("{0:4s}\t{1:3s}\t{2:20s}\t{3:4s}\t{4:2s}".format(c,t,n,f,sm))
reading = False
reading = True
c = s[0]
t = s[1]
n = s[2]
sm = s[-5]
if (sm == 'A') or (sm == 'S1') or (sm == 'S2'):
sm = s[-6]
i = 3
while s[i] != "T" and s[i] != "P" and s[i] != "T-P" and s[i] != "TP":
n += " " + s[i]
i += 1
if s[0] == "TOTAL" and s[1] == "DO":
f = s[6]
|
python
|
#Jackknife reduction templates for NIRC2 and OSIRIS pipelines.
#Author: Sean Terry
def jackknife():
"""
Do the Jackknife data reduction.
"""
##########
#
# NIRC2 Format
#
##########
##########
# Ks-band reduction
##########
# Nite 1
target = 'MB07192'
sci_files1 = list(range(173, 177+1))
sky_files1 = list(range(206, 215+1))
refSrc1 = [385., 440.] #This is the target nearest to center
sky.makesky(sky_files1, 'nite1', 'ks', instrument=nirc2)
data.clean(sci_files1, 'nite1', 'ks', refSrc1, refSrc1, instrument=nirc2)
# Nite 2
sci_files2 = list(range(195, 203+1))
sky_files2 = list(range(206, 215+1))
refSrc2 = [387., 443.] #This is the target nearest to center
sky.makesky(sky_files2, 'nite2', 'ks', instrument=nirc2)
data.clean(sci_files2, 'nite2', 'ks', refSrc2, refSrc2, instrument=nirc2)
#-----------------
sci_files = sci_files1 + sci_files2
for i in enumerate(sci_files, start=1):
jack_list = sci_files[:]
jack_list.remove(i[1])
data.calcStrehl(jack_list, 'ks', instrument=nirc2)
data.combine(jack_list, 'ks', '27maylgs', trim=1, weight='strehl',
instrument=nirc2, outSuffix='_' + str(i[0]))
os.chdir('reduce')
#---------------------------------------------------------------------------------
#---------------------------------------------------------------------------------
#---------------------------------------------------------------------------------
def jackknife():
"""
Do the Jackknife data reduction.
"""
##########
#
# OSIRIS Format
#
##########
##########
# Kp-band reduction
##########
target = 'OB06284'
sci_files = ['i200810_a004{0:03d}_flip'.format(ii) for ii in range(2, 26+1)]
sky_files = ['i200810_a007{0:03d}_flip'.format(ii) for ii in range(2, 6+1)]
refSrc = [1071., 854.] # This is the target
sky.makesky(sky_files, target, 'kp_tdOpen', instrument=osiris)
for i in enumerate(sci_files, start=1):
jack_list = sci_files[:]
jack_list.remove(i[1])
data.clean(jack_list, target, 'kp_tdOpen', refSrc, refSrc, field=target, instrument=osiris)
data.calcStrehl(jack_list, 'kp_tdOpen', field=target, instrument=osiris)
data.combine(jack_list, 'kp_tdOpen', epoch, field=target,
trim=0, weight='strehl', instrument=osiris, outSuffix=str(i[0]))
os.chdir('reduce')
|
python
|
"""
This module contains helper functions for handling manipulation
of atom geometries
"""
import numpy as np
def _correct_vec(vec):
''' correct vectors in fractional coordinates
(assuming vectors minimal connection between 2 points)
'''
vec[np.where(vec >= 0.5)] -= 1.0
vec[np.where(vec < -0.5)] += 1.0
return(vec)
def find_max_empty_space(atoms, edir=3):
"""
NOTE: copied from ase-espresso! Python3 compatibility & stand-alone
Assuming periodic boundary conditions, finds the largest
continuous segment of free, unoccupied space and returns
its midpoint in scaled coordinates (0 to 1) in the edir direction (default z).
"""
position_array = atoms.get_scaled_positions()[..., edir - 1] # 0-indexed direction
position_array.sort()
differences = np.diff(position_array)
differences = np.append(differences, position_array[0] + 1 - position_array[-1]) # through the PBC
max_diff_index = np.argmax(differences)
if max_diff_index == len(position_array) - 1:
return (position_array[0] + 1 + position_array[-1]) / 2. % 1 # should be < 1 in cell units
else:
return (position_array[max_diff_index] + position_array[max_diff_index + 1]) / 2.
def get_CN(atoms, rcut, type_a='*', type_b='*'):
rpos = atoms.get_scaled_positions(); cell = atoms.get_cell()
inds = []
for ty in [type_a,type_b]:
if ty == '*':
ty = list(range(len(atoms)))
else:
ty = np.array([np.where(atoms.get_atomic_numbers() == t)[0] \
for t in ty]).flatten()
inds.append(ty)
cns = []
for i in range(len(inds[0])):
cns.append(__get_immediate_CN(rpos[inds[1],:],rpos[i,:],cell,rcut).size - 1)
return(np.array(inds[0]), np.array(cns))
def __get_immediate_CN(pos_array,pos,cell,rcut):
''' function to calculate distance array (pos_array - pos) and determine
entries within distance rcut
input: pos_array = positions which to calculate distances from
pos = origin position
cell = transformation for distance vectors
rcut = cutoff for which to obtain points within distance
output: cord = entries of points in pos_array within distance rcut
'''
dvec = _correct_vec(pos_array-pos)
dvec = np.dot(dvec,cell)
dist = np.linalg.norm(dvec,axis=1)
cord = np.where(dist <= rcut)[0]
return(cord)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from PyQt5 import QtWidgets
from src.gui.configtool.connectionControl import ConnectionControlGroupBox
from src.gui.configtool.controlLoopConfig import ControlLoopGroupBox
from src.gui.configtool.deviceJoggingControl import DeviceJoggingControl
from src.gui.configtool.droDisplayWidget import DROGroupBox
from src.gui.configtool.generalControls import GeneralControls
from src.gui.configtool.generalSettingsWidget import GeneralSettingsGroupBox
from src.gui.configtool.graphicWidget import SimpleFOCGraphicWidget
from src.gui.configtool.pidConfiguration import PidGroupBox
from src.gui.configtool.torqueConfig import TorqueGroupBox
from src.gui.sharedcomnponets.commandLineInterface import CommandLineWidget
from src.gui.sharedcomnponets.sharedcomponets import (WorkAreaTabWidget,
GUIToolKit)
from src.simpleFOCConnector import SimpleFOCDevice
class DeviceConfigurationTool(WorkAreaTabWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.device = SimpleFOCDevice.getInstance()
self.setObjectName('DeviceConfigurationTool')
self.verticalLayout = QtWidgets.QVBoxLayout(self)
self.verticalLayout.setObjectName('verticalLayout')
self.counterWidget = QtWidgets.QWidget(self)
self.counterWidget.setObjectName('counterWidget')
self.horizontalLayout = QtWidgets.QHBoxLayout(self.counterWidget)
self.horizontalLayout.setObjectName('horizontalLayout')
self.digitalReadOut = DROGroupBox(self.counterWidget)
self.horizontalLayout.addWidget(self.digitalReadOut)
self.controlLoop = ControlLoopGroupBox(self.counterWidget)
self.horizontalLayout.addWidget(self.controlLoop)
self.torqueConfig = TorqueGroupBox(self.counterWidget)
self.horizontalLayout.addWidget(self.torqueConfig)
self.connectionControl = ConnectionControlGroupBox(self.counterWidget)
self.horizontalLayout.addWidget(self.connectionControl)
self.verticalLayout.addWidget(self.counterWidget)
self.graphicWidget = SimpleFOCGraphicWidget()
self.verticalLayout.addWidget(self.graphicWidget)
self.bottomWidget = QtWidgets.QWidget(self)
self.bottomWidget.setObjectName('bottomWidget')
self.bottomHorizontalLayout = QtWidgets.QHBoxLayout(self.bottomWidget)
self.bottomHorizontalLayout.setObjectName('configureHorizontalLayout')
self.pidConfigurator = PidGroupBox(self.bottomWidget)
self.bottomHorizontalLayout.addWidget(self.pidConfigurator)
self.generalLayout = QtWidgets.QVBoxLayout()
self.generalDeviceSettings = GeneralSettingsGroupBox(self.bottomWidget)
self.generalControls = GeneralControls(self.bottomWidget)
self.generalLayout.addWidget(self.generalControls)
self.generalLayout.addWidget(self.generalDeviceSettings)
self.bottomHorizontalLayout.addLayout(self.generalLayout)
self.lasWidget = QtWidgets.QWidget(self)
self.lastVerticalLayout = QtWidgets.QVBoxLayout(self.lasWidget)
self.commandLine = CommandLineWidget(self)
self.lastVerticalLayout.addWidget(self.commandLine)
self.joggingControl = DeviceJoggingControl(self)
self.lastVerticalLayout.addWidget(self.joggingControl)
self.bottomHorizontalLayout.addWidget(self.lasWidget)
self.verticalLayout.addWidget(self.bottomWidget)
self.device.commProvider.commandDataReceived.connect(self.commandLine.publishCommandResponseData)
def getTabIcon(self):
return GUIToolKit.getIconByName('motor')
def getTabName(self):
return self.device.connectionID
def configureConnection(self, configvalues):
self.device.serialPortName = configvalues['serialPortName']
self.device.serialRate = configvalues['serialRate']
self.device.stopBits = configvalues['stopBits']
self.device.serialByteSize = configvalues['serialByteSize']
self.device.serialParity = configvalues['serialParity']
|
python
|
from protocols.video_protocols.UCSDProtocol import UCSDProtocol
from protocols.video_protocols.AvenueProtocol import AvenueProtocol
from protocols.video_protocols.ShanghaiTechProtocol import ShanghaiTechProtocol
from protocols.video_protocols.SubwayProtocol import SubwayProtocol
|
python
|
import logging
import re
from html import unescape
from urllib.parse import quote
from dvtag.utils import create_request_session
session = create_request_session()
class DoujinVoice():
def __init__(self, rjid: str) -> None:
self.rjid = rjid
self.dl_count = 0
self.url = ""
self.work_name = ""
self.work_image = ""
self.seiyus = []
self.circle = ""
self.sale_date = ""
self._init_metadata()
self._add_metadata()
self._get_cover()
def _add_metadata(self):
html = session.get(self.url).text
try:
pattern = r'<th>声優</th>[\s\S]*?<td>[\s\S]*?(<a[\s\S]*?>[\s\S]*?)</td>'
seiyu_list_html = re.search(pattern, html).group(1)
pattern = r'<a[\s\S]*?>(.*?)<'
for seiyu_html in re.finditer(pattern, seiyu_list_html):
self.seiyus.append(unescape(seiyu_html.group(1)))
except AttributeError as e:
logging.error("Cannot get artists from {}: {}".format(
self.rjid, e))
try:
pattern = r"<th>サークル名</th>[\s\S]*?<a[\s\S]*?>(.*?)<"
circle = re.search(pattern, html).group(1)
self.circle = unescape(circle)
except AttributeError as e:
logging.error("Cannot get circle from {}: {}".format(self.rjid, e))
# get sale date
pattern = r'www\.dlsite\.com/maniax/new/=/year/([0-9]{4})/mon/([0-9]{2})/day/([0-9]{2})/'
match = re.search(pattern, html)
if match:
self.sale_date = "{}-{}-{}".format(match.group(1), match.group(2),
match.group(3))
def _init_metadata(self):
rsp = session.get(
"https://www.dlsite.com/maniax/product/info/ajax?product_id=" +
self.rjid)
try:
json_data = rsp.json()[self.rjid]
self.dl_count = int(json_data["dl_count"])
self.url = json_data["down_url"].replace("download/split",
"work").replace(
"download", "work")
self.work_name = json_data["work_name"]
self.work_image = "https:" + json_data["work_image"]
except ValueError as e:
logging.error(
f"Cannot convert a response to json or convert dl_count to int with RJ-ID {self.rjid}: {e}",
)
except KeyError as e:
logging.error(e)
def _get_cover(self):
"""
Tries to fetch a better cover
"""
try:
search_url = "https://chobit.cc/s/?f_category=vo&q_keyword=" + quote(
self.work_name)
headers = {'cookie': 'showr18=1'}
search_result = session.get(search_url, headers=headers).text
href = re.search(r'work-work-name.*?<a.*href=\"(.*?)\"',
search_result).group(1)
detail_url = "https://chobit.cc" + href
detail = session.get(detail_url, headers=headers).text
self.work_image = re.search(r'albumart="(.*?)"', detail).group(1)
except Exception as e:
logging.warning(
f"Cannot fetch cover from chobit for {self.rjid}: {e}")
|
python
|
# vim: set tabstop=4 shiftwidth=4 expandtab
##############################################################################
# Written by: Ray Wang <[email protected]>
# Date: 01/13/2008
# Description: Application wrapper for datetimepicker_dropdown.py
# be called by ../datetimepicker_dropdown_ops.py
##############################################################################
"""Application wrapper for datetimepicker_dropdown.py"""
from strongwind import *
import time
class DateTimePickerDropDownFrame(accessibles.Frame):
"""the profile of the datetimepicker_dropdown sample"""
LABEL = 'The date you select is:'
LABEL_SPACE = ' '
LABEL_COMMA = ','
def __init__(self, accessible):
super(DateTimePickerDropDownFrame, self).__init__(accessible)
self.localtime = time.localtime()
self.panel = self.findPanel(None)
self.treetables = self.findAllTreeTables(None)
self.spinbuttons = self.findAllSpinButtons(None)
self.items = self.findAllTableCells(None, checkShowing=False)
self.weekdays = self.items[0:7]
self.months = self.items[7:]
self.spaces = self.findAllLabels(self.LABEL_SPACE)
self.commas = self.findAllLabels(self.LABEL_COMMA)
self.checkbox = self.findCheckBox(None)
self.weekday = self.treetables[0]
self.month = self.treetables[1]
self.day = self.spinbuttons[0]
self.year = self.spinbuttons[1]
self.dropdownbutton = self.findPushButton(None)
self.label = self.findLabel(self.LABEL)
def click(self, button):
procedurelogger.action("click %s" % button)
button.click()
def assertText(self, accessible, expected_text):
"""assert the accessible's text is equal to the expected text"""
procedurelogger.action('Check the text of: %s' % accessible)
actual_text = accessible.text
procedurelogger.expectedResult('Text is "%s"' % actual_text)
assert actual_text == expected_text, 'Text was "%s", expected "%s"' % \
(actual_text, expected_text)
def assertUneditableText(self, accessible, text):
'''
Ensure that the EditableText interface is not implemented for the
accessible
'''
procedurelogger.action('Attempt to set %s text to "%s"' % \
(accessible, text))
try:
# this uses the EditableText interface
accessible.text = text
except NotImplementedError:
return
assert False, "The Text interface should not be implemented for %s" % \
(accessible)
def assignValue(self, accessible, value):
procedurelogger.action('set "%s" value to "%s"' % (accessible, value))
accessible.value = value
def selectChild(self, accessible, index):
"""
Simply call strongwind's selectChild method but add some logging
information
"""
procedurelogger.action('Select index %s of "%s"' % (index, accessible))
accessible.selectChild(index)
def assertName(self, accessible, expected_name):
"""assert name is equal to the expected_name"""
# this method be used in checking if the name of spin button is
# updated when change day or year's number
procedurelogger.action('Assert the name of %s' % accessible)
procedurelogger.expectedResult('%s expects its name is"%s"' % \
(accessible, expected_name))
actual_name = accessible.name
assert actual_name == expected_name, \
'actual name is: %s, expected name is: %s' % \
(actual_name, expected_name)
def quit(self):
self.altF4()
|
python
|
from malcolm.yamlutil import check_yaml_names, make_block_creator
ADAndor3_driver_block = make_block_creator(__file__, "ADAndor3_driver_block.yaml")
ADAndor3_runnable_block = make_block_creator(__file__, "ADAndor3_runnable_block.yaml")
__all__ = check_yaml_names(globals())
|
python
|
import torch
import torch.nn as nn
import embeddings
from torch.autograd import Variable
import pdb
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
class RNNEncoder(nn.Module):
def __init__(self, ninp, nhid, nlayers, bsz):
super(RNNEncoder, self).__init__()
self.rnn = torch.nn.LSTM(ninp, nhid, nlayers)
self.nlayers = nlayers
self.nhid = nhid
def forward(self, emb, seq_len, hidden):
seq_len, perm_idx = seq_len.sort(0, descending=True)
emb = emb[:, perm_idx]
packed_input = pack_padded_sequence(emb, seq_len.int().cpu().numpy())
_, hidden = self.rnn(packed_input, hidden)
_, unperm_idx = perm_idx.sort(0)
return hidden[0][1][unperm_idx]
def init_hidden(self, batch_size):
weight = next(self.parameters())
return (weight.new_zeros(self.nlayers, batch_size, self.nhid),
weight.new_zeros(self.nlayers, batch_size, self.nhid))
class AttentionEncoder(nn.Module):
def __init__(self, ninp, nhid, nlayers, max_seq_length):
super(AttentionEncoder, self).__init__()
self.nhid = nhid
self.nlayers = nlayers
self.rnn = torch.nn.LSTM(ninp, nhid, nlayers)
self.attn = torch.nn.Linear(self.nhid, max_seq_length)
def forward(self, emb, seq_len, hidden):
seq_len, perm_idx = seq_len.sort(0, descending=True)
emb = emb[:, perm_idx]
packed_input = pack_padded_sequence(emb, seq_len.int().cpu().numpy())
output, hidden = self.rnn(packed_input, hidden)
output, lengths = pad_packed_sequence(output)
hidden = hidden[0][1]
attn_weights = F.softmax(self.attn(hidden), dim=1)
max_length = torch.max(lengths).item()
attn = torch.bmm(attn_weights[:, :max_length].unsqueeze(0).transpose(0, 1), output.transpose(0, 1))
attn = F.relu(attn)
_, unperm_idx = perm_idx.sort(0)
return attn[unperm_idx]
def init_hidden(self, batch_size):
weight = next(self.parameters())
return (weight.new_zeros(self.nlayers, batch_size, self.nhid),
weight.new_zeros(self.nlayers, batch_size, self.nhid))
class AttentionEncoderV2(nn.Module):
def __init__(self, ninp, nhid, nlayers, max_seq_length):
super(AttentionEncoderV2, self).__init__()
self.nhid = nhid
self.nlayers = nlayers
self.rnn = torch.nn.LSTM(ninp, nhid, nlayers)
self.attn = torch.nn.Linear(self.nhid, self.nhid)
def forward(self, emb, seq_len, hidden):
seq_len, perm_idx = seq_len.sort(0, descending=True)
emb = emb[:, perm_idx]
packed_input = pack_padded_sequence(emb, seq_len.int().cpu().numpy())
output, hidden = self.rnn(packed_input, hidden)
output, _ = pad_packed_sequence(output)
hidden = hidden[0][1]
pre_attn_weights = F.softmax(self.attn(hidden), dim=1)
attn_weights = torch.bmm(pre_attn_weights.unsqueeze(1), output.transpose(0, 1).transpose(1, 2))
attn = torch.bmm(attn_weights, output.transpose(0, 1)).squeeze(1)
attn = F.relu(attn)
_, unperm_idx = perm_idx.sort(0)
return attn[unperm_idx]
def init_hidden(self, batch_size):
weight = next(self.parameters())
return (weight.new_zeros(self.nlayers, batch_size, self.nhid),
weight.new_zeros(self.nlayers, batch_size, self.nhid))
class Decoder(nn.Module):
def __init__(self, nhid, ntoken):
super(Decoder, self).__init__()
self.decoder = nn.Linear(nhid, ntoken)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, encoded):
return self.decoder(encoded)
class SimpleContainer(nn.Module):
def __init__(self, embed, encoder, decoder):
super(SimpleContainer, self).__init__()
self.embed = embed
self.encoder = encoder
self.decoder = decoder
def forward(self, input, seq_len, hidden):
emb = (self.embed(input).detach())
encoded = self.encoder(emb, seq_len, hidden)
return self.decoder(encoded)
def init_hidden(self, batch_size):
return self.encoder.init_hidden(batch_size)
|
python
|
# Generated by Django 3.0.7 on 2020-08-22 19:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('staff', '0007_auto_20200820_2133'),
]
operations = [
migrations.AlterField(
model_name='staff',
name='mname',
field=models.CharField(blank=True, default='', max_length=20, verbose_name='Middle Name'),
),
migrations.AlterField(
model_name='staff',
name='status',
field=models.CharField(choices=[('Registred', 'Registered'), ('Pending', 'Pending'), ('Updated', 'Updated'), ('Approved', 'Approved'), ('Disapproved', 'Disapproved')], default='Registered', max_length=11, verbose_name='STATUS'),
),
]
|
python
|
from flask_sqlalchemy import SQLAlchemy
from models import ProductArea
#This file contians the functions that compose the Product Area API
"""
The getProductAreas function, doesn't take any parameters.
the ProductArea.query.all() returns a list of all the ProductAreas in the ProductArea table
the serialize function turns an instance of the ProductArea object into a list with all it's attributes
this list is appended by the listOfProductAreas and the list of lists is returned by the function.
"""
def getProductAreas():
listOfProductAreas = []
listOfProductAreas = [i.serialize for i in ProductArea.query.all()]
return listOfProductAreas
"""
The deleteProductAreas receives the id and a database session.
It's a simple function that queries the database to find which element has the id and then delete it
it returns false if there was an error or true if there wasn't
"""
def deleteProductAreas(id,db_cursor):
cursor = db_cursor
try:
ProductArea.query.filter_by(id=id).delete()
cursor.commit()
except Exception as e:
print("[ERROR] Something went wrong.")
print(e)
return False
else:
return True
"""
The addProductAreas receives the name of the area as product_area and the database session.
The function creates a ProductArea object and adds it to the database
it returns false if there was an error or true if there wasn't
"""
def addProductAreas(product_area, db_cursor):
cursor = db_cursor
try:
productArea = ProductArea(product_area)
cursor.add(productArea)
cursor.commit()
except Exception as e:
print("[ERROR] Something went wrong.")
print(e)
return False
else:
return True
|
python
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import time
import botocore.session
from botocore import exceptions
from tests import unittest
class TestApigateway(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
self.client = self.session.create_client('apigateway', 'us-east-1')
# Create a resource to use with this client.
self.api_name = 'mytestapi'
self.api_id = self.create_rest_api_or_skip()
def create_rest_api_or_skip(self):
try:
api_id = self.client.create_rest_api(name=self.api_name)['id']
except exceptions.ClientError as e:
if e.response['Error']['Code'] == 'TooManyRequestsException':
raise unittest.SkipTest(
"Hit API gateway throttle limit, skipping test.")
raise
return api_id
def delete_api(self):
retries = 0
while retries < 10:
try:
self.client.delete_rest_api(restApiId=self.api_id)
break
except exceptions.ClientError as e:
if e.response['Error']['Code'] == 'TooManyRequestsException':
retries += 1
time.sleep(5)
else:
raise
def tearDown(self):
self.delete_api()
def test_put_integration(self):
# The only resource on a brand new api is the path. So use that ID.
path_resource_id = self.client.get_resources(
restApiId=self.api_id)['items'][0]['id']
# Create a method for the resource.
self.client.put_method(
restApiId=self.api_id,
resourceId=path_resource_id,
httpMethod='GET',
authorizationType='None'
)
# Put an integration on the method.
response = self.client.put_integration(
restApiId=self.api_id,
resourceId=path_resource_id,
httpMethod='GET',
type='HTTP',
integrationHttpMethod='GET',
uri='https://api.endpoint.com'
)
# Assert the response was successful by checking the integration type
self.assertEqual(response['type'], 'HTTP')
|
python
|
#-*-coding:utf-8-*-
# date:2020-03-28
# Author: xiang li
import numpy as np # 加载 Numpy 库
import torch # 加载 Torch 库
'''
思考这一节的用法与实际项目的使用方式结合
'''
if __name__ == "__main__":
x = torch.tensor(3.1415)
print(x.floor())# tensor 向上取整
print(x.ceil())# tensor 向下取整
print(x.trunc())# tensor 取整数部分
print(x.frac())# tensor 取小数部分
y = torch.tensor(3.4)
z = torch.tensor(3.5)
print(y.round(), z.round())# 对tensor四舍五入
|
python
|
from __future__ import absolute_import
import numpy as np
from pydci import DCI
from ann_benchmarks.algorithms.base import BaseANN
class PYDCIKNN(BaseANN):
def __init__(self, dim, num_simple=10, num_composite=10):
self.name = 'PYDCIKNN(d=%d, num_simple=%d, num_composite=%d' % (
dim, num_simple, num_composite)
self._dim = dim
self._num_simple = num_simple
self._num_composite = num_composite
# query arguments
self._max_retrieve_const = None
self._max_composite_visit_const = None
# set up empty database
self._dci = DCI(dim, num_simple, num_composite)
self._fitted = False
def fit(self, X):
# Will reset if fit multiple times; use update to add points
if self._fitted:
self._dci = DCI(self._dim, self._num_simple, self._num_composite,
X)
else:
self._dci.add(X)
self._fitted = True
def update(self, X):
self._dci.add(X)
self._fitted = True
def set_query_arguments(self, max_retrieve_const,
max_composite_visit_const):
self._max_retrieve_const = max_retrieve_const
self._max_composite_visit_const = max_composite_visit_const
def query(self, v, n):
indices, _, _ = self._dci.query(
np.array([v]), k=n,
max_retrieve_const=self._max_retrieve_const,
max_composite_visit_const=self._max_composite_visit_const,
)
return indices
|
python
|
from .commands import (
MODULES, SYSTEMS, args
)
__all__ = ["args", "MODULES", 'SYSTEMS']
|
python
|
# -*- coding: utf8 -*-
from pathlib import Path
HOME = str(Path.home())
# BASE_URL = 'http://localhost'
BASE_URL_BACKEND = 'http://130.211.114.2'
BASE_API_SERVER = 'http://34.122.87.173'
PORT = 80
# PORT1 = 5000
# PORT2 = 5001
API_URL_BASE = "{}:{}".format(BASE_API_SERVER, PORT)
API_BACKEND = "{}:{}".format(BASE_URL_BACKEND, PORT)
API_SERVER = "{}:{}".format(BASE_API_SERVER, PORT)
USERDATA_PATH = HOME + "/.dataspine/userdata"
PUBLIC_KEY_PATH = HOME + "/.dataspine/public-key"
KUBE_CONFIG_PATH = HOME + '/.kube/config'
|
python
|
from mxnet.gluon import HybridBlock
import mxnet as mx
class SigmoidCrisp(HybridBlock):
def __init__(self, smooth=1.e-2,**kwards):
super().__init__(**kwards)
self.smooth = smooth
with self.name_scope():
self.gamma = self.params.get('gamma', shape=(1,), init=mx.init.One())
def hybrid_forward(self, F, input, gamma):
out = self.smooth + F.sigmoid(gamma)
out = F.reciprocal(out)
out = F.broadcast_mul(input,out)
out = F.sigmoid(out)
return out
|
python
|
from __future__ import unicode_literals
from django.db import models
from adventures.models import Adventure
class Picture(models.Model):
adventure = models.ForeignKey(Adventure)
img = models.ImageField(upload_to='pictures')
description = models.CharField(max_length=50)
funny_facts = models.TextField(max_length=256)
|
python
|
import subprocess
import os
class Pythuby:
def __init__(self, code = False, file = False):
if code:
self.code = code
self.pythuby()
if file:
self.file = file
def pythuby(self):
with open("Temp.rb", "w") as temp_rb_script:
temp_rb_script.write(self.code)
temp_rb_script.close()
def runPythuby(self):
cmd = subprocess.Popen("ruby Temp.rb", shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
result = cmd.stdout.read().decode("UTF-8")
#os.remove("Temp.rb")
return (result)
def include(self, file):
cmd = subprocess.Popen("ruby {}".format(file), shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result = (cmd.stdout.read() + cmd.stderr.read()).decode("UTF-8")
return (result)
|
python
|
import time
from typing import Optional, List
import graphene as g
from graphql.execution.base import ResolveInfo
from graphql_jwt.decorators import login_required, superuser_required
from contak import models
from contak.graphql.object_types import Contact
LOAD_DELAY = 0.5
class Query:
contact = g.Field(Contact, id=g.ID(required=True))
all_contacts = g.NonNull(g.List(g.NonNull(Contact)))
my_contacts = g.NonNull(g.List(g.NonNull(Contact)))
@staticmethod
@login_required
def resolve_contact(
_parent: None, info: ResolveInfo, id: str
) -> Optional[models.Contact]:
time.sleep(LOAD_DELAY)
user = info.context.user
try:
return models.Contact.objects.get(id=id, user=user)
except models.Contact.DoesNotExist:
return None
@staticmethod
@superuser_required
def resolve_all_contacts(_parent: None, _info: ResolveInfo) -> List[models.Contact]:
time.sleep(LOAD_DELAY)
return models.Contact.objects.all()
@staticmethod
@login_required
def resolve_my_contacts(_parent: None, info: ResolveInfo) -> List[models.Contact]:
time.sleep(LOAD_DELAY)
user = info.context.user
return models.Contact.objects.filter(user=user)
|
python
|
# -*- coding: utf-8 -*-
"""The preg front-end."""
import logging
from dfvfs.helpers import file_system_searcher
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from dfvfs.resolver import resolver as path_spec_resolver
from plaso.dfwinreg import definitions as dfwinreg_definitions
from plaso.dfwinreg import registry as dfwinreg_registry
from plaso.engine import queue
from plaso.engine import single_process
from plaso.frontend import extraction_frontend
from plaso.lib import errors
from plaso.parsers import mediator as parsers_mediator
from plaso.parsers import manager as parsers_manager
from plaso.parsers import winreg_plugins # pylint: disable=unused-import
from plaso.preprocessors import manager as preprocess_manager
class PregItemQueueConsumer(queue.ItemQueueConsumer):
"""Class that implements a list event object queue consumer."""
def __init__(self, event_queue):
"""Initializes the list event object queue consumer.
Args:
event_queue: the event object queue (instance of Queue).
"""
super(PregItemQueueConsumer, self).__init__(event_queue)
self._event_objects = []
def _ConsumeItem(self, event_object, **unused_kwargs):
"""Consumes an item callback for ConsumeItems.
Args:
event_object: the event object (instance of EventObject).
"""
self._event_objects.append(event_object)
def GetItems(self):
"""Retrieves the consumed event objects.
Yields:
Event objects (instance of EventObject)
"""
if not self._event_objects:
raise StopIteration
event_object = self._event_objects.pop(0)
while event_object:
yield event_object
if not self._event_objects:
break
event_object = self._event_objects.pop(0)
class PregFrontend(extraction_frontend.ExtractionFrontend):
"""Class that implements the preg front-end.
Attributes:
knowledge_base_object: the knowledge base object (instance
of KnowledgeBase).
"""
def __init__(self):
"""Initializes the front-end object."""
super(PregFrontend, self).__init__()
self._mount_path_spec = None
self._parse_restore_points = False
self._preprocess_completed = False
self._registry_files = []
self._registry_plugin_list = (
parsers_manager.ParsersManager.GetWindowsRegistryPlugins())
self._searcher = None
self._single_file = False
self._source_path = None
self._source_path_specs = []
self.knowledge_base_object = None
@property
def registry_plugin_list(self):
"""The Windows Registry plugin list (instance of PluginList)."""
return self._registry_plugin_list
# TODO: clean up this function as part of dfvfs find integration.
def _FindRegistryPaths(self, searcher, pattern):
"""Return a list of Windows Registry file path specifications.
Args:
searcher: the file system searcher object (instance of
dfvfs.FileSystemSearcher).
pattern: the pattern to find.
Returns:
A list of path specification objects (instance of PathSpec).
"""
# TODO: optimize this in one find.
registry_file_paths = []
file_path, _, file_name = pattern.rpartition(u'/')
# The path is split in segments to make it path segment separator
# independent (and thus platform independent).
path_segments = file_path.split(u'/')
if not path_segments[0]:
path_segments = path_segments[1:]
find_spec = file_system_searcher.FindSpec(
location_regex=path_segments, case_sensitive=False)
path_specs = list(searcher.Find(find_specs=[find_spec]))
if not path_specs:
logging.debug(u'Directory: {0:s} not found'.format(file_path))
return registry_file_paths
for path_spec in path_specs:
directory_location = getattr(path_spec, u'location', None)
if not directory_location:
raise errors.PreProcessFail(
u'Missing directory location for: {0:s}'.format(file_path))
# The path is split in segments to make it path segment separator
# independent (and thus platform independent).
path_segments = searcher.SplitPath(directory_location)
path_segments.append(file_name)
# Remove mount part if OS mount path is set.
# TODO: Instead of using an absolute path spec, use a mount point one.
if self._mount_path_spec:
mount_point_location = getattr(self._mount_path_spec, u'location', u'')
mount_point_segments = mount_point_location.split(u'/')
if not mount_point_segments[0]:
mount_point_segments = mount_point_segments[1:]
remove_mount_point = True
for index in range(0, len(mount_point_segments)):
mount_point_segment = mount_point_segments[index]
if mount_point_segment != path_segments[index]:
remove_mount_point = False
if remove_mount_point:
path_segments = path_segments[len(mount_point_segments):]
find_spec = file_system_searcher.FindSpec(
location_regex=path_segments, case_sensitive=False)
fh_path_specs = list(searcher.Find(find_specs=[find_spec]))
if not fh_path_specs:
logging.debug(u'File: {0:s} not found in directory: {1:s}'.format(
file_name, directory_location))
continue
registry_file_paths.extend(fh_path_specs)
return registry_file_paths
def _GetRegistryHelperFromPath(self, path, codepage):
"""Return a Registry helper object from a path.
Given a path to a Registry file this function goes through
all the discovered source path specifications (instance of PathSpec)
and extracts Registry helper objects based on the supplied
path.
Args:
path: the path filter to a Registry file.
codepage: the codepage used for the Registry file. The default is cp1252.
Yields:
A Registry helper object (instance of PregRegistryHelper).
"""
for source_path_spec in self._source_path_specs:
type_indicator = source_path_spec.TYPE_INDICATOR
if type_indicator == dfvfs_definitions.TYPE_INDICATOR_OS:
file_entry = path_spec_resolver.Resolver.OpenFileEntry(source_path_spec)
if file_entry.IsFile():
yield PregRegistryHelper(
file_entry, u'OS', self.knowledge_base_object, codepage=codepage)
continue
# TODO: Change this into an actual mount point path spec.
self._mount_path_spec = source_path_spec
collector_name = type_indicator
parent_path_spec = getattr(source_path_spec, u'parent', None)
if parent_path_spec:
parent_type_indicator = parent_path_spec.TYPE_INDICATOR
if parent_type_indicator == dfvfs_definitions.TYPE_INDICATOR_VSHADOW:
vss_store = getattr(parent_path_spec, u'store_index', 0)
collector_name = u'VSS Store: {0:d}'.format(vss_store)
searcher = self._GetSearcher()
for registry_file_path in self._FindRegistryPaths(searcher, path):
file_entry = searcher.GetFileEntryByPathSpec(registry_file_path)
yield PregRegistryHelper(
file_entry, collector_name, self.knowledge_base_object,
codepage=codepage)
def _GetRegistryTypes(self, plugin_name):
"""Retrieves the Windows Registry types based on a filter string.
Args:
plugin_name: string containing the name of the plugin or an empty
string for all types.
Returns:
A list of Windows Registry types.
"""
types = set()
for plugin in self.GetRegistryPlugins(plugin_name):
for key_plugin_class in self._registry_plugin_list.GetAllKeyPlugins():
if plugin.NAME == key_plugin_class.NAME:
types.add(key_plugin_class.REG_TYPE)
break
return list(types)
def _GetRegistryTypesFromPlugins(self, plugin_names):
"""Return a list of Registry types extracted from a list of plugin names.
Args:
plugin_names: a list of plugin names.
Returns:
A list of Registry types extracted from the supplied plugins.
"""
if not plugin_names:
return []
plugins_list = self._registry_plugin_list
registry_file_types = set()
for plugin_name in plugin_names:
for plugin_class in plugins_list.GetAllKeyPlugins():
if plugin_name == plugin_class.NAME.lower():
# If a plugin is available for every Registry type
# we need to make sure all Registry files are included.
if plugin_class.REG_TYPE == u'any':
registry_file_types.extend(dfwinreg_definitions.REGISTRY_FILE_TYPES)
else:
registry_file_types.add(plugin_class.REG_TYPE)
return list(registry_file_types)
def _GetSearcher(self):
"""Retrieve a searcher for the first source path specification.
Returns:
A file system searcher object (instance of dfvfs.FileSystemSearcher)
for the first discovered source path specification, or None if there are
no discovered source path specifications.
"""
if not self._source_path_specs:
return
if self._searcher:
return self._searcher
file_system, mount_point = self._GetSourceFileSystem(
self._source_path_specs[0])
self._searcher = file_system_searcher.FileSystemSearcher(
file_system, mount_point)
# TODO: close file_system after usage.
return self._searcher
# TODO: refactor, this is a duplicate of the function in engine.
def _GetSourceFileSystem(self, source_path_spec, resolver_context=None):
"""Retrieves the file system of the source.
The mount point path specification refers to either a directory or
a volume on storage media device or image. It is needed by the dfVFS
file system searcher (instance of FileSystemSearcher) to indicate
the base location of the file system.
Args:
source_path_spec: The source path specification (instance of
dfvfs.PathSpec) of the file system.
resolver_context: Optional resolver context (instance of dfvfs.Context).
The default is None. Note that every thread or process
must have its own resolver context.
Returns:
A tuple of the file system (instance of dfvfs.FileSystem) and
the mount point path specification (instance of path.PathSpec).
Raises:
RuntimeError: if source path specification is not set.
"""
if not source_path_spec:
raise RuntimeError(u'Missing source.')
file_system = path_spec_resolver.Resolver.OpenFileSystem(
source_path_spec, resolver_context=resolver_context)
type_indicator = source_path_spec.type_indicator
if path_spec_factory.Factory.IsSystemLevelTypeIndicator(type_indicator):
mount_point = source_path_spec
else:
mount_point = source_path_spec.parent
return file_system, mount_point
def CreateParserMediator(self, event_queue=None):
"""Create a parser mediator object.
Args:
event_queue: an optional event queue object (instance of Queue).
The default is None.
Returns:
A parser mediator object (instance of parsers_mediator.ParserMediator).
"""
if event_queue is None:
event_queue = single_process.SingleProcessQueue()
event_queue_producer = queue.ItemQueueProducer(event_queue)
parse_error_queue = single_process.SingleProcessQueue()
parse_error_queue_producer = queue.ItemQueueProducer(parse_error_queue)
return parsers_mediator.ParserMediator(
event_queue_producer, parse_error_queue_producer,
self.knowledge_base_object)
def ExpandKeysRedirect(self, keys):
"""Expands a list of Registry key paths with their redirect equivalents.
Args:
keys: a list of Windows Registry key paths.
"""
for key in keys:
if key.startswith(u'\\Software') and u'Wow6432Node' not in key:
_, first, second = key.partition(u'\\Software')
keys.append(u'{0:s}\\Wow6432Node{1:s}'.format(first, second))
def GetRegistryFilePaths(self, plugin_name=None, registry_file_type=None):
"""Returns a list of Registry paths.
If the Registry file type is not set this functions attempts to determine
it based on the presence of specific Registry keys.
Args:
plugin_name: optional string containing the name of the plugin or an empty
string or None for all the types. The default is None.
registry_file_type: optional Windows Registry file type string.
The default is None, which represents auto-detect.
Returns:
A list of path names for Registry files.
"""
if self._parse_restore_points:
restore_path = u'/System Volume Information/_restor.+/RP[0-9]+/snapshot/'
else:
restore_path = u''
if registry_file_type:
types = [registry_file_type]
else:
types = self._GetRegistryTypes(plugin_name)
# Gather the Registry files to fetch.
paths = []
for reg_type in types:
if reg_type == dfwinreg_definitions.REGISTRY_FILE_TYPE_NTUSER:
paths.append(u'/Documents And Settings/.+/NTUSER.DAT')
paths.append(u'/Users/.+/NTUSER.DAT')
if restore_path:
paths.append(u'{0:s}/_REGISTRY_USER_NTUSER.+'.format(restore_path))
elif reg_type == dfwinreg_definitions.REGISTRY_FILE_TYPE_SAM:
paths.append(u'{sysregistry}/SAM')
if restore_path:
paths.append(u'{0:s}/_REGISTRY_MACHINE_SAM'.format(restore_path))
elif reg_type == dfwinreg_definitions.REGISTRY_FILE_TYPE_SECURITY:
paths.append(u'{sysregistry}/SECURITY')
if restore_path:
paths.append(u'{0:s}/_REGISTRY_MACHINE_SECURITY'.format(restore_path))
elif reg_type == dfwinreg_definitions.REGISTRY_FILE_TYPE_SOFTWARE:
paths.append(u'{sysregistry}/SOFTWARE')
if restore_path:
paths.append(u'{0:s}/_REGISTRY_MACHINE_SOFTWARE'.format(restore_path))
elif reg_type == dfwinreg_definitions.REGISTRY_FILE_TYPE_SYSTEM:
paths.append(u'{sysregistry}/SYSTEM')
if restore_path:
paths.append(u'{0:s}/_REGISTRY_MACHINE_SYSTEM'.format(restore_path))
elif reg_type == dfwinreg_definitions.REGISTRY_FILE_TYPE_USRCLASS:
paths.append(u'/Users/.+/AppData/Local/Microsoft/Windows/UsrClass.dat')
# Expand all the paths.
win_registry = dfwinreg_registry.WinRegistry()
# TODO: deprecate usage of pre_obj.
path_attributes = self.knowledge_base_object.pre_obj.__dict__
expanded_key_paths = []
for key_path in paths:
try:
expanded_key_path = win_registry.ExpandKeyPath(
key_path, path_attributes)
expanded_key_paths.append(expanded_key_path)
except KeyError as exception:
logging.error(
u'Unable to expand key path: {0:s} with error: {1:s}'.format(
key_path, exception))
return expanded_key_paths
# TODO: refactor this function. Current implementation is too complex.
def GetRegistryHelpers(
self, registry_file_types=None, plugin_names=None, codepage=u'cp1252'):
"""Returns a list of discovered Registry helpers.
Args:
registry_file_types: optional list of Windows Registry file types,
e.g.: NTUSER, SAM, etc that should be included.
The default is None.
plugin_names: optional list of strings containing the name of the
plugin(s) or an empty string for all the types. The default
is None.
codepage: the codepage used for the Registry file. The default is cp1252.
Returns:
A list of Registry helper objects (instance of PregRegistryHelper).
Raises:
ValueError: If neither registry_file_types nor plugin name is passed
as a parameter.
"""
if registry_file_types is None and plugin_names is None:
raise ValueError(
u'Missing registry_file_types or plugin_name value.')
if plugin_names is None:
plugin_names = []
else:
plugin_names = [plugin_name.lower() for plugin_name in plugin_names]
# TODO: use non-preprocess collector with filter to collect Registry files.
if not self._single_file and not self._preprocess_completed:
file_system, mount_point = self._GetSourceFileSystem(
self._source_path_specs[0])
preprocess_manager.PreprocessPluginsManager.RunPlugins(
u'Windows', file_system, mount_point, self.knowledge_base_object)
self._preprocess_completed = True
file_system.Close()
if registry_file_types is None:
registry_file_types = []
types_from_plugins = self._GetRegistryTypesFromPlugins(plugin_names)
registry_file_types.extend(types_from_plugins)
paths = []
if self._single_file:
paths = [self._source_path]
elif registry_file_types:
for registry_file_type in registry_file_types:
paths.extend(self.GetRegistryFilePaths(
registry_file_type=registry_file_type.upper()))
else:
for plugin_name in plugin_names:
paths.extend(self.GetRegistryFilePaths(plugin_name=plugin_name))
self.knowledge_base_object.SetDefaultCodepage(codepage)
registry_helpers = []
for path in paths:
registry_helpers.extend([
helper for helper in self._GetRegistryHelperFromPath(path, codepage)])
return registry_helpers
def GetRegistryPlugins(self, plugin_name):
"""Retrieves the Windows Registry plugins based on a filter string.
Args:
plugin_name: string containing the name of the plugin or an empty
string for all the plugins.
Returns:
A list of Windows Registry plugins (instance of RegistryPlugin).
"""
key_plugins = {}
for plugin in self._registry_plugin_list.GetAllKeyPlugins():
key_plugins[plugin.NAME] = plugin
if not plugin_name:
return key_plugins.values()
plugin_name = plugin_name.lower()
plugins_to_run = []
for key_plugin_name, key_plugin in iter(key_plugins.items()):
if plugin_name in key_plugin_name.lower():
plugins_to_run.append(key_plugin)
return plugins_to_run
def GetRegistryPluginsFromRegistryType(self, registry_file_type):
"""Retrieves the Windows Registry plugins based on a Registry type.
Args:
registry_file_type: the Windows Registry files type string.
Returns:
A list of Windows Registry plugins (instance of RegistryPlugin).
"""
key_plugins = {}
for plugin in self._registry_plugin_list.GetAllKeyPlugins():
key_plugins.setdefault(plugin.REG_TYPE.lower(), []).append(plugin)
if not registry_file_type:
return key_plugins.values()
registry_file_type = registry_file_type.lower()
plugins_to_run = []
for key_plugin_type, key_plugin_list in iter(key_plugins.items()):
if registry_file_type == key_plugin_type:
plugins_to_run.extend(key_plugin_list)
elif key_plugin_type == u'any':
plugins_to_run.extend(key_plugin_list)
return plugins_to_run
def ParseRegistryFile(
self, registry_helper, key_paths=None, use_plugins=None):
"""Extracts events from a Registry file.
This function takes a Registry helper object (instance of
PregRegistryHelper) and information about either Registry plugins or keys.
The function then opens up the Registry file and runs the plugins defined
(or all if no plugins are defined) against all the keys supplied to it.
Args:
registry_helper: Registry helper object (instance of PregRegistryHelper)
key_paths: optional list of Registry keys paths that are to be parsed.
The default is None, which results in no keys parsed.
use_plugins: optional list of plugins used to parse the key. The
default is None, in which case all plugins are used.
Returns:
A dict that contains the following structure:
key_path:
key: a Registry key (instance of WinRegKey)
subkeys: a list of Registry keys (instance of WinRegKey).
data:
plugin: a plugin object (instance of RegistryPlugin)
event_objects: List of event objects extracted.
key_path 2:
...
Or an empty dict on error.
"""
if not registry_helper:
return {}
try:
registry_helper.Open()
except IOError as exception:
logging.error(u'Unable to parse Registry file, with error: {0:s}'.format(
exception))
return {}
return_dict = {}
if key_paths is None:
key_paths = []
for key_path in key_paths:
key = registry_helper.GetKeyByPath(key_path)
return_dict[key_path] = {u'key': key}
if not key:
continue
return_dict[key_path][u'subkeys'] = list(key.GetSubkeys())
return_dict[key_path][u'data'] = self.ParseRegistryKey(
key, registry_helper, use_plugins=use_plugins)
return return_dict
def ParseRegistryKey(self, key, registry_helper, use_plugins=None):
"""Parse a single Registry key and return parsed information.
Parses the Registry key either using the supplied plugin or trying against
all available plugins.
Args:
key: the Registry key to parse (instance of WinRegKey or a string
containing key path).
registry_helper: the Registry helper object (instance of
PregRegistryHelper).
use_plugins: optional list of plugin names to use. The default is None
which uses all available plugins.
Returns:
A dictionary with plugin objects as keys and extracted event objects from
each plugin as values or an empty dict on error.
"""
return_dict = {}
if not registry_helper:
return return_dict
if isinstance(key, basestring):
key = registry_helper.GetKeyByPath(key)
if not key:
return return_dict
registry_file_type = registry_helper.file_type
plugins = {}
plugins_list = self._registry_plugin_list
# Compile a list of plugins we are about to use.
for weight in plugins_list.GetWeights():
plugin_list = plugins_list.GetPluginsByWeight(weight, registry_file_type)
plugins[weight] = []
for plugin in plugin_list:
plugin_object = plugin()
if use_plugins:
if plugin_object.NAME in use_plugins:
plugins[weight].append(plugin_object)
else:
plugins[weight].append(plugin_object)
event_queue = single_process.SingleProcessQueue()
event_queue_consumer = PregItemQueueConsumer(event_queue)
parser_mediator = self.CreateParserMediator(event_queue)
parser_mediator.SetFileEntry(registry_helper.file_entry)
for weight in plugins:
for plugin in plugins[weight]:
plugin.Process(parser_mediator, key=key)
event_queue_consumer.ConsumeItems()
event_objects = [
event_object for event_object in event_queue_consumer.GetItems()]
if event_objects:
return_dict[plugin] = event_objects
return return_dict
def SetSingleFile(self, single_file=False):
"""Sets the single file processing parameter.
Args:
single_file: boolean value, if set to True the tool treats the
source as a single file input, otherwise as a storage
media format. The default is False.
"""
self._single_file = single_file
def SetSourcePath(self, source_path):
"""Sets the source path.
Args:
source_path: the filesystem path to the disk image.
"""
self._source_path = source_path
def SetSourcePathSpecs(self, source_path_specs):
"""Sets the source path resolver.
Args:
source_path_specs: list of source path specifications (instance
of PathSpec).
"""
self._source_path_specs = source_path_specs
def SetKnowledgeBase(self, knowledge_base_object):
"""Sets the knowledge base object for the front end.
Args:
knowledge_base_object: the knowledge base object (instance
of KnowledgeBase).
"""
self.knowledge_base_object = knowledge_base_object
class PregRegistryHelper(object):
"""Class that defines few helper functions for Registry operations.
Attributes:
file_entry: file entry object (instance of dfvfs.FileEntry).
"""
def __init__(
self, file_entry, collector_name, knowledge_base_object,
codepage=u'cp1252'):
"""Initialize the Registry helper.
Args:
file_entry: file entry object (instance of dfvfs.FileEntry).
collector_name: the name of the collector, eg. TSK.
knowledge_base_object: A knowledge base object (instance of
KnowledgeBase), which contains information from
the source data needed for parsing.
codepage: optional codepage value used for the Registry file. The default
is cp1252.
"""
self._Reset()
self._codepage = codepage
self._collector_name = collector_name
self._knowledge_base_object = knowledge_base_object
self._win_registry = dfwinreg_registry.WinRegistry(
backend=dfwinreg_registry.WinRegistry.BACKEND_PYREGF)
self.file_entry = file_entry
def __enter__(self):
"""Make usable with "with" statement."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make usable with "with" statement."""
self.Close()
@property
def collector_name(self):
"""The name of the collector used to discover the Registry file."""
return self._collector_name
@property
def file_type(self):
"""The Registry file type."""
return self._registry_file_type
@property
def name(self):
"""The name of the Registry file."""
return getattr(self._registry_file, u'name', u'N/A')
@property
def path(self):
"""The file path of the Registry file."""
path_spec = getattr(self.file_entry, u'path_spec', None)
if not path_spec:
return u'N/A'
return getattr(path_spec, u'location', u'N/A')
@property
def root_key(self):
"""The root key of the Registry file."""
if self._registry_file:
return self._registry_file.GetKeyByPath(u'\\')
def _Reset(self):
"""Reset all attributes of the Registry helper."""
self._currently_loaded_registry_key = u''
self._registry_file = None
self._registry_file_type = dfwinreg_definitions.REGISTRY_FILE_TYPE_UNKNOWN
def Close(self):
"""Closes the helper."""
self._Reset()
def ExpandKeyPath(self, key_path, path_attributes):
"""Expand a Registry key path based on path attributes.
A Registry key path may contain path attributes. A path attribute is
defined as anything within a curly bracket, e.g.
"\\System\\{my_attribute}\\Path\\Keyname".
If the path attribute my_attribute is defined it's value will be replaced
with the attribute name, e.g. "\\System\\MyValue\\Path\\Keyname".
If the Registry path needs to have curly brackets in the path then
they need to be escaped with another curly bracket, e.g.
"\\System\\{my_attribute}\\{{123-AF25-E523}}\\KeyName". In this
case the {{123-AF25-E523}} will be replaced with "{123-AF25-E523}".
Args:
key_path: the Registry key path before being expanded.
path_attributes: a dictionary containing the path attributes.
Returns:
A Registry key path that's expanded based on attribute values.
"""
return self._win_registry.ExpandKeyPath(key_path, path_attributes)
def GetCurrentRegistryKey(self):
"""Return the currently loaded Registry key."""
return self._currently_loaded_registry_key
def GetCurrentRegistryPath(self):
"""Return the loaded Registry key path or None if no key is loaded."""
key = self._currently_loaded_registry_key
if not key:
return
return key.path
def GetKeyByPath(self, key_path):
"""Retrieves a specific key defined by the Registry key path.
Args:
key_path: the Registry key path.
Returns:
The key (instance of WinRegKey) if available or None otherwise.
"""
if not key_path:
return
# TODO: deprecate usage of pre_obj.
path_attributes = self._knowledge_base_object.pre_obj.__dict__
try:
expanded_key_path = self._win_registry.ExpandKeyPath(
key_path, path_attributes)
except KeyError:
expanded_key_path = key_path
key = self._registry_file.GetKeyByPath(expanded_key_path)
if not key:
return
self._currently_loaded_registry_key = key
return key
def Open(self):
"""Open the Registry file."""
if self._registry_file:
raise IOError(u'Registry file already open.')
try:
self._registry_file = self._win_registry.OpenFileEntry(
self.file_entry, codepage=self._codepage)
except IOError:
logging.error(
u'Unable to open Registry file: {0:s} [{1:s}]'.format(
self.path, self._collector_name))
self.Close()
raise
self._registry_file_type = self._win_registry.GetRegistryFileType(
self._registry_file)
# Retrieve the Registry file root key because the Registry helper
# expects self._currently_loaded_registry_key to be set after
# the Registry file is opened.
self.GetKeyByPath(u'\\')
|
python
|
from .tracker import Tracker
class Rifle:
def __init__(self , display_width , display_height , triggerHandler):
# openCV tracer
self.tracker = Tracker('orange')
# variables to scale the cursor to desired screen width and height
self.display_width = display_width
self.display_height = display_height
# initial cursor location
self.x = display_width // 2
self.y = display_height // 2
self.triggerHandler = triggerHandler # function to be executed when trigger is pulled
self.point_radius = 5
self.color = (255 , 0 , 0) # pointer color
self.loaded = True # check if gun is ready to shoot next round
self.prev = False # to keep track of shooting in EXP function
def scalePointer(self , video_width = 640 , video_height = 480 ):
# function to retrive the current position of the pointer and scale to current game window size.
sight , trigger = self.tracker.getPos()
if sight:
self.x , self.y = (sight[0] + sight[2] , sight[1] + sight[3] )
# scaling the pointer
# new_value = ( (old_value - old_min) / (old_max - old_min) ) * (new_max - new_min) + new_min
self.x = int( (self.display_width / video_width) * self.x )
self.y = int( (self.display_height / video_height) * self.y )
# OLD
# self.x = int((self.display_height * self.x) / video_height)
# self.y = int((self.display_width * self.y ) / video_width)
if trigger:
self.loaded = True
elif self.loaded:
self.triggerHandler(self.x , self.y)
self.loaded = False
return (self.x , self.y)
def scalePointerExp(self ,pos , shoot, video_width = 640 , video_height = 480 ):
# similar to scalePointer but can set the x , y explicity (mouse)
self.x , self.y = pos
if shoot:
if not self.prev:
self.triggerHandler(self.x , self.y)
self.prev = shoot
scaled_x = int((self.display_height * self.x) / video_height)
scaled_y = int((self.display_width * self.y ) / video_width)
return (scaled_x , scaled_y)
|
python
|
# -*- coding: utf-8 -*-
"""
* This is a script file to associate images and IMU data.
* Copyright 2018 Nanjing University of Science and Technology
* Author: Zhixing Hou <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
fileImage = "D:/Dataset/myKinect/dataIMU1/filenames.txt"
fileIMU = "D:/Dataset/myKinect/dataIMU1/imuData.txt"
fileIMU2 = "D:/Dataset/myKinect/dataIMU1/imuDataRemErr.txt"
fileAss = "D:/Dataset/myKinect/dataIMU1/association.txt"
timeStampImage = []
timeStampIMU = []
timeImage = []
timeIMU = []
fl = open(fileImage,"r")
lines = fl.readlines()
for line3 in lines:
# for li in line3:
timeStampImage.append(line3.split()[0])
timeImage.append(float(line3.split()[0][6:]))
fl.close()
fl = open(fileIMU,"r")
lineIMU = fl.readlines()
fl.close()
firstIMU = True
anglePrev = []
angleCurr = []
fl = open(fileIMU2,"w")
for line3 in lineIMU:
if firstIMU:
timeStampIMU.append(line3.split()[0])
timeIMU.append(float(line3.split()[0][6:]))
anglePrev = line3.split()[7:10]
# print([anglePrev,"\n"])
fl.writelines([" ".join(line3.split()), "\n"])
firstIMU = False
else:
angleCurr = line3.split()[7:10]
if abs(float(anglePrev[2]))> 178.5 or abs(float(angleCurr[2]))> 178.5:
if abs(float(angleCurr[0]) - float(anglePrev[0])) < 3.0 and abs(float(angleCurr[1]) - float(anglePrev[1])) < 3.0 and abs(abs(float(angleCurr[2])) - abs(float(anglePrev[2]))) < 3.0 :
timeStampIMU.append(line3.split()[0])
timeIMU.append(float(line3.split()[0][6:]))
fl.writelines([" ".join(line3.split()), "\n"])
anglePrev = angleCurr
# print([line3.split()[0], " ", angleCurr,"\n"])
# else:
# print([line3.split()[0], " ", angleCurr," ", anglePrev, "\n"])
else:
if abs(float(angleCurr[0]) - float(anglePrev[0])) < 3.0 and abs(float(angleCurr[1]) - float(anglePrev[1])) < 3.0 and abs(float(angleCurr[2]) - float(anglePrev[2])) < 3.0 :
timeStampIMU.append(line3.split()[0])
timeIMU.append(float(line3.split()[0][6:]))
fl.writelines([" ".join(line3.split()), "\n"])
anglePrev = angleCurr
# print([line3.split()[0], " ", angleCurr,"\n"])
# else:
# print([line3.split()[0], " ", angleCurr," ", anglePrev, "\n"])
fl.close()
fl = open(fileAss,'w')
indImage = 0
indIMU = 0
minDiffTime = 10
for tImage in timeImage:
for tIMU in timeIMU:
diffTime = abs(tImage - tIMU)
if diffTime < minDiffTime:
minDiffTime = diffTime
indIMU = timeIMU.index(tIMU)
fl.writelines([timeStampImage[indImage]," ",timeStampIMU[indIMU],"\n"])
print([timeStampImage[indImage]," ",timeStampIMU[indIMU],"\n"])
indImage = indImage + 1
minDiffTime = 10
fl.close()
|
python
|
# Given a sorted array of integers nums and integer values a, b and c. Apply a function of the form f(x) = ax2 + bx + c to each element x in the array.
# The returned array must be in sorted order.
# Expected time complexity: O(n)
# Example:
# nums = [-4, -2, 2, 4], a = 1, b = 3, c = 5,
# Result: [3, 9, 15, 33]
# nums = [-4, -2, 2, 4], a = -1, b = 3, c = 5
# Result: [-23, -5, 1, 7]
# V0
# V1
# https://www.jiuzhang.com/solution/sort-transformed-array/#tag-highlight-lang-python
class Solution:
"""
@param nums: a sorted array
@param a:
@param b:
@param c:
@return: a sorted array
"""
def sortTransformedArray(self, nums, a, b, c):
# Write your code here
res = [0 for i in range(len(nums))]
start = 0;
end = len(nums) - 1
cnt = 0;
if a >= 0:
cnt = end
while start <= end:
startNum = a * nums[start] * nums[start] + b * nums[start] + c
endNum = a * nums[end] * nums[end] + b * nums[end] + c
if a >= 0:
if startNum >= endNum:
res[cnt] = startNum
cnt -= 1
start += 1
else:
res[cnt] = endNum
cnt -= 1
end -= 1
else: # a < 0
if startNum <= endNum:
res[cnt] = startNum
cnt += 1
start += 1
else:
res[cnt] = endNum
cnt += 1
end -= 1
return res
# V1'
# https://blog.csdn.net/qq508618087/article/details/51700774
# JAVA
# class Solution {
# public:
# vector<int> sortTransformedArray(vector<int>& nums, int a, int b, int c) {
# if(nums.size() ==0) return {};
# vector<int> result;
# int left = 0, right = nums.size()-1;
# auto func = [=](int x) { return a*x*x + b*x + c; };
# while(left <= right)
# {
# int val1 = func(nums[left]), val2 = func(nums[right]);
# if(a > 0) result.push_back(val1>=val2?val1:val2);
# if(a > 0) val1>val2?left++:right--;
# if(a <= 0) result.push_back(val1>=val2?val2:val1);
# if(a <= 0) val1>val2?right--:left++;
# }
# if(a > 0) reverse(result.begin(), result.end());
# return result;
# }
# };
# V2
# Time: O(n)
# Space: O(1)
class Solution(object):
def sortTransformedArray(self, nums, a, b, c):
"""
:type nums: List[int]
:type a: int
:type b: int
:type c: int
:rtype: List[int]
"""
f = lambda x, a, b, c : a * x * x + b * x + c
result = []
if not nums:
return result
left, right = 0, len(nums) - 1
d = -1 if a > 0 else 1
while left <= right:
if d * f(nums[left], a, b, c) < d * f(nums[right], a, b, c):
result.append(f(nums[left], a, b, c))
left += 1
else:
result.append(f(nums[right], a, b, c))
right -= 1
return result[::d]
|
python
|
import numpy as np
import warnings
def into_patches(image, patch_shape, patch_n):
"""
Process a 2D image into evenly spaced-out 2D patches.
Arguments:
image: image to process into patches as a 2D numpy array.
patch_size: target size of patches: (height, width).
patch_n: number of rows and columns of patches: (rows, columns).
Returns:
A stack of patches as a numpy array of shape (patch_n[0]*patch_n[1], y, x).
"""
y_stride = (image.shape[0] - patch_shape[0]) / (patch_n[0] - 1) if patch_n[0] > 1 else 0
x_stride = (image.shape[1] - patch_shape[1]) / (patch_n[1] - 1) if patch_n[1] > 1 else 0
out = np.stack([
image[
int(row*y_stride):int(row*y_stride)+patch_shape[0],
int(col*x_stride):int(col*x_stride)+patch_shape[1]
] for col in range(patch_n[1]) for row in range(patch_n[0])
])
return out
def from_patches(patches, patch_n, target_shape, pad=0):
"""
Assemble a 2D image stack from evenly spaced-out 2D patches.
Overlapping areas will be averaged.
Arguments:
patches: stack of patches as a numpy array of shape (patch_n[0]*patch_n[1], y, x).
patch_n: number of rows and columns of patches: (rows, columns).
target_shape: target shape in which the patches shall be assembled into.
pad: cropping to apply to patches on all sides.
Returns:
A "D assembly of the patches as a numpy array in the target shape.
"""
y_stride = (target_shape[0] - patches.shape[1]) / (patch_n[0] - 1) if patch_n[0] > 1 else 0
x_stride = (target_shape[1] - patches.shape[2]) / (patch_n[1] - 1) if patch_n[1] > 1 else 0
canvas_shape = list(target_shape)+[2]
if pad:
patches = patches[:, pad:-pad, pad:-pad]
canvas_shape[0] -= 2*pad
canvas_shape[1] -= 2*pad
coords = [
(
slice(int(y*y_stride), int(y*y_stride) + patches.shape[1]),
slice(int(x*x_stride), int(x*x_stride) + patches.shape[2])
) for x in range(patch_n[1]) for y in range(patch_n[0])
]
canvas = np.zeros(canvas_shape)
for patch, coord in zip(patches, coords):
canvas[coord] += np.stack([patch, np.ones(patch.shape)], -1)
if np.any(canvas[...,-1] == 0):
warnings.warn("zero-coverage regions detected")
return canvas[...,~-1]/canvas[...,-1]
def into_patches_3d(image, patch_shape, patch_n):
"""
Process a 3D image stack into evenly spaced-out 2D patches.
Arguments:
image: image stack to process into patches as a 3D numpy array.
patch_size: target size of patches: (height, width).
patch_n: number of rows and columns of patches: (rows, columns).
Returns:
A stack of patches as a numpy array of shape (patch_n[0]*patch_n[1]*z, y, x).
"""
assert len(patch_shape) == len(patch_n), "Rank of patch shape and patch number need to match number of selected axis"
y_stride = (image.shape[1] - patch_shape[0]) / (patch_n[0] - 1) if patch_n[0] > 1 else 0
x_stride = (image.shape[2] - patch_shape[1]) / (patch_n[1] - 1) if patch_n[1] > 1 else 0
out = np.vstack([
image[
:,
int(row*y_stride):int(row*y_stride)+patch_shape[0],
int(col*x_stride):int(col*x_stride)+patch_shape[1]
] for col in range(patch_n[1]) for row in range(patch_n[0])
])
return out
def from_patches_3d(patches, patch_n, target_shape, pad=0):
"""
Assemble a 3D image stack from evenly spaced-out 2D patches.
Patches need to be grouped along first array axis by patch position, not by Z-slice;
this can be ensured by using PatchUtil.into_patches_3d to create patches.
Overlapping areas will be averaged.
Arguments:
patches: stack of patches as a numpy array of shape (patch_n[0]*patch_n[1]*z, y, x).
patch_n: number of rows and columns of patches: (rows, columns).
target_shape: target shape in which the patches shall be assembled into.
pad: cropping to apply to patches on all sides.
Returns:
A 3D assembly of the patches as a numpy array in the target shape.
"""
# TODO: check whether optimizing this function is viable, counter channel could also just be 2D.
y_stride = (target_shape[1] - patches.shape[1]) / (patch_n[0] - 1) if patch_n[0] > 1 else 0
x_stride = (target_shape[2] - patches.shape[2]) / (patch_n[1] - 1) if patch_n[1] > 1 else 0
canvas_shape = list(target_shape)+[2]
if pad:
patches = patches[:, pad:-pad, pad:-pad]
canvas_shape[1] -= 2*pad
canvas_shape[2] -= 2*pad
unstacked = np.split(patches, patch_n[0]*patch_n[1])
coords = [
(
slice(0, canvas_shape[0]),
slice(int(y*y_stride), int(y*y_stride) + patches.shape[1]),
slice(int(x*x_stride), int(x*x_stride) + patches.shape[2])
) for x in range(patch_n[1]) for y in range(patch_n[0])
]
canvas = np.zeros(canvas_shape)
for patch, coord in zip(unstacked, coords):
canvas[coord] += np.stack([patch, np.ones(patch.shape)], -1)
if np.any(canvas[...,-1] == 0):
warnings.warn("zero-coverage regions detected")
return canvas[...,~-1]/canvas[...,-1]
|
python
|
# -*- coding:utf-8 -*-
# Copyright 2019 Huawei Technologies Co.,Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from openstack import proxy2
from openstack.bssintl.v1 import bill as _bill
from openstack.bssintl.v1 import customer_credit as _customer_credit
from openstack.bssintl.v1 import customer_management as _customer_management
from openstack.bssintl.v1 import enquiry as _enquiry
from openstack.bssintl.v1 import pay_per_use_resource as _pay_per_use_resources
from openstack.bssintl.v1 import period_order as _period_order
from openstack.bssintl.v1 import period_resourse as _period_resourse
from openstack.bssintl.v1 import realname_auth as _realname_auth
from openstack.bssintl.v1 import utilities as _utilities
class Proxy(proxy2.BaseProxy):
def query_customer_resource(self, domain_id, **kwargs):
'''
A customer can query its pay-per-use resources on the partner sales platform.
The on-demand resource data has a latency, and the latency for each cloud service data varies.
The data obtained using this API is for reference only.
This API can be invoked using the partner AK/SK or token only.
:param domain_id:
:param kwargs:
:return:
'''
return self._create(_pay_per_use_resources.QueryCustomerResource, domain_id=domain_id, **kwargs)
def query_partner_monthly_bills(self, domain_id, **kwargs):
'''
This API is used to query monthly bills.
This API can be invoked only by the partner account AK/SK or token.
:param domain_id:
:param kwargs:
:return:
'''
return self._list_once(_bill.QueryPartnerMonthlyBills, domain_id=domain_id, requires_id=False, **kwargs)
def enable_auto_renew(self, domain_id, resource_id, action_id, **kwargs):
'''
A customer can use this API to enable automatic subscription renewal for its long-term yearly/monthly resources to prevent the resources from being deleted when they are expired.
This API can be invoked using the customer token only.
:param action_id:
:param domain_id:
:param resource_id:
:param kwargs:
:return:
'''
return self._create(_period_resourse.AutoRenew, domain_id=domain_id, resource_id=resource_id, action_id=action_id, **kwargs)
def disable_auto_renew(self, domain_id, resource_id, action_id, **kwargs):
'''
A customer can disable automatic subscription renewal when needed. After disabling this function, the customer needs to manually renew the subscription to the resources before they expire.
This API can be invoked using the customer token only.
:param domain_id:
:param resource_id:
:param action_id:
:return:
'''
return self._delete(_period_resourse.AutoRenew, domain_id=domain_id, resource_id=resource_id, action_id=action_id, **kwargs)
def renew_subscription_by_resourceId(self, domain_id, **kwargs):
'''
When subscription to yearly/monthly resources of a customer is about to expire, the customer can renew the subscription to the resources.
This API can be invoked using the customer token only.
:param domain_id:
:param kwargs:
:return:
'''
return self._create(_period_resourse.RenewSubscriptionByResourceId, domain_id=domain_id, **kwargs)
def unsubscribe_by_resourceId(self, domain_id, **kwargs):
'''
If a customer has subscribed to a yearly/monthly resource, the customer can use this API to unsubscribe from the resource, including the renewed part and currently used part.
The customer cannot use the resources after unsubscription.
This API can be invoked using the customer token only.
:param domain_id:
:param kwargs:
:return:
'''
return self._create(_period_resourse.UnsubscribeByResourceId, domain_id=domain_id, **kwargs)
def pay_period_order(self, domain_id, **kwargs):
'''
A customer can invoke this API to pay yearly-monthly product orders in the pending payment status.
This API can be invoked using the customer token only.
:param domain_id:
:param kwargs:
:return:
'''
return self._create(_period_order.PayPeriodOrder, domain_id=domain_id, **kwargs)
def unsubscribe_period_order(self, domain_id, **kwargs):
'''
A customer can invoke this API to unsubscribe from early-monthly product orders in the subscribed, changing, or failed to be provisioned status.
This API can be invoked using the customer token only.
:param domain_id:
:param kwargs:
:return:
'''
return self._delete(_period_order.UnsubscribePeriodOrder, domain_id=domain_id, requires_id=False, **kwargs)
def cancel_order(self, domain_id, **kwargs):
'''
A customer can invoke this API to cancel orders in the pending payment status.
This API can be invoked using the customer token only.
:param domain_id:
:param kwargs:
:return:
'''
return self._create(_period_order.CancelOrder, domain_id=domain_id, **kwargs)
def query_customer_period_resources_list(self, domain_id, **kwargs):
'''
A customer can query one or all yearly/monthly resources on the customer platform.
This API can be invoked only by the customer AK/SK or token.
:param domain_id:
:param kwargs:
:return:
'''
return self._list_once(_period_resourse.QueryCustomerPeriodResourcesList, domain_id=domain_id, requires_id=False, **kwargs)
def query_order_detail(self, domain_id, **kwargs):
'''
A customer can query resource details and provisioning status of an order on the partner sales platform.
This API can be invoked using the customer token only.
:param domain_id:
:param kwargs:
:return:
'''
return self._list_once(_period_order.QueryOrderDetail, domain_id=domain_id, requires_id=False, **kwargs)
def query_order_list(self, domain_id, **kwargs):
'''
After a customer purchases yearly/monthly resources, it can query the orders in different statuses,
such as in the pending approval, processing, canceled, completed, and pending payment statuses.
This API can be invoked using the customer AK/SK or token.
:param domain_id:
:param kwargs:
:return:
'''
return self._list_once(_period_order.QueryOrderList, domain_id=domain_id, requires_id=False, **kwargs)
def query_credit(self, domain_id, **kwargs):
'''
* This API can be used to query the budget of a customer for the partner to determine whether to adjust the budget.
* This API can be invoked only by the partner account AK/SK or token.
:param domain_id:
:param kwargs:
:return:
'''
return self._list_once(_customer_credit.QueryCredit, domain_id=domain_id, requires_id=False, **kwargs)
def set_credit(self, domain_id, **kwargs):
'''
* This API is used to set or adjust a customer's budget.
* The api is only allowed to be called with the partner's AK/SK or Token.
:param domain_id:
:param kwargs:
:return:
'''
return self._create(_customer_credit.SetCredit, domain_id=domain_id, **kwargs)
def query_rating(self, domain_id, **kwargs):
'''
The partner sales platform obtains the product prices on the HUAWEI CLOUD official website based on the product catalog.
This API can be invoked using the customer token, or the partner's AK/SK or token.
:param domain_id:
:param kwargs:
:return:
'''
return self._create(_enquiry.QueryRating, domain_id=domain_id, **kwargs)
def create_customer(self, domain_id, **kwargs):
'''
This API is used to create a HUAWEI CLOUD account for a customer when the customer creates an account on your sales platform,
and bind the customer account on the partner sales platform to the HUAWEI CLOUD account.
In addition, the HUAWEI CLOUD account is bound to the partner account.
This API can be invoked only by the partner AK/SK or token.
:param domain_id:
:param kwargs:
:return:
'''
return self._create(_customer_management.CreateCustomer, domain_id=domain_id, **kwargs)
def check_customer_register_info(self, domain_id, **kwargs):
'''
This API is used to check whether the account name, and mobile number or email address entered by the customer can be used for registration.
This API can be invoked only by the partner AK/SK or token.
:param domain_id:
:param kwargs:
:return:
'''
return self._create(_customer_management.CheckCustomerRegisterInfo, domain_id=domain_id, **kwargs)
def query_customer_list(self, domain_id, **kwargs):
'''
This API is used to query your customers.
This API can be invoked only by the partner account AK/SK or token.
:param domain_id:
:param kwargs:
:return:
'''
return self._create(_customer_management.QueryCustomerList, domain_id=domain_id, **kwargs)
def send_verification_code(self, domain_id, **kwargs):
'''
If customers enter email addresses for registration, this API is used to send a registration verification code to the email addresses to verify the registration information.
This API can be invoked only by the partner AK/SK or token.
:param domain_id:
:param kwargs:
:return:
'''
return self._create(_utilities.SendVerificationcode, domain_id=domain_id, **kwargs)
def individual_realname_auth(self, domain_id, **kwargs):
'''
This API can be used to submit an individual real-name authentication application.
This API can be invoked only by the partner account AK/SK or token.
:param domain_id:
:param kwargs:
:return:
'''
return self._create(_realname_auth.IndividualRealnameAuth, domain_id=domain_id, **kwargs)
def enterprise_realname_auth(self, domain_id, **kwargs):
'''
This API can be used to submit an enterprise real-name authentication application.
This API can be invoked only by the partner account AK/SK or token.
:param domain_id:
:param kwargs:
:return:
'''
return self._create(_realname_auth.EnterpriseRealnameAuth, domain_id=domain_id, **kwargs)
def change_enterprise_realname_auth(self, domain_id, **kwargs):
'''
* This API can be used to submit a real-name authentication change application.
* This API can be invoked only by the partner account AK/SK or token.
:param domain_id:
:param kwargs:
:return:
'''
return self._create(_realname_auth.ChangeEnterpriseRealnameAuth, domain_id=domain_id, **kwargs)
def query_realname_auth(self, domain_id, **kwargs):
'''
If the response to a real-name authentication application or real-name authentication change application indicates that manual review is required,
this API can be used to query the review result.
This API can be invoked only by the partner account AK/SK or token.
:param domain_id:
:param kwargs:
:return:
'''
return self._list_once(_realname_auth.QueryRealnameAuth, domain_id=domain_id, **kwargs)
def query_resource_status_by_orderId(self, domain_id, order_id, **kwargs):
'''
A customer can query resource details and provisioning status of an order on the partner sales platform.
This API can be invoked using the customer token only.
:param domain_id:
:param order_id:
:param kwargs:
:return:
'''
return self._list_once(_period_order.QueryResourceStatusByOrderId, domain_id=domain_id, order_id=order_id, **kwargs)
def query_refund_order_amount(self, domain_id, order_id, **kwargs):
'''
* A customer can query the resources and original orders of the unsubscription amount for an unsubscription order or degrade order.
* This API can be invoked using the AK/SK or token of the partner or the token of the partner's customer.
:param domain_id:
:param order_id:
:param kwargs:
:return:
'''
return self._list_once(_period_order.QueryRefundOrderAmount, domain_id=domain_id, order_id=order_id, **kwargs)
def query_monthly_expenditure_summary(self, domain_id, **kwargs):
'''
* This API can be used to query the expenditure summary bills of a customer on the customer platform. The bills summarize the summary data by month. The data of the previous day is updated once a day.
* This API can be invoked using the customer AK/SK or token only.
:param domain_id:
:param kwargs:
:return:
'''
return self._list_once(_bill.QueryMonthlyExpenditureSummary, domain_id=domain_id, **kwargs)
def query_resource_usage_details(self, domain_id, **kwargs):
'''
his API can be used to query usage details of each resource for a customer on the customer platform. The resource details have a latency (a maximum of 24 hours).
This API can be invoked using the customer AK/SK or token only.
:param domain_id:
:param kwargs:
:return:
'''
return self._list_once(_bill.QueryResourceUsageDetails, domain_id=domain_id, **kwargs)
def query_resource_usage_record(self, domain_id, **kwargs):
'''
This API can be used to query the usage details of each resource for a customer on the customer platform.
This API can be invoked using the customer AK/SK or token only.
:param domain_id:
:param kwargs:
:return:
'''
return self._list_once(_bill.QueryResourceUsageRecord, domain_id=domain_id, **kwargs)
def freeze_customer(self, domain_id, **kwargs):
'''
A partner can freeze an account of a customer associated with the partner by reseller model.
This API can be invoked only by the partner account AK/SK or token.
:param domain_id:
:param kwargs:
:return:
'''
return self._create(_customer_management.FreezeCustomer, domain_id=domain_id, **kwargs)
def unfreeze_customer(self, domain_id, **kwargs):
'''
A partner can unfreeze an account of a customer associated with the partner by reseller model.
This API can be invoked only by the partner account AK/SK or token.
:param domain_id:
:param kwargs:
:return:
'''
return self._create(_customer_management.UnfreezeCustomer, domain_id=domain_id, **kwargs)
|
python
|
from typing import List
from datetime import datetime
from pydantic import BaseModel
from model.Schema.word import Word
class init_module(BaseModel):
module_name: str
publisher: str
word: List[Word]
class Module(init_module):
created_at: datetime
module_id: int
class Response_module(BaseModel):
module: Module
word: List[Word]
class Config:
orm_mode = True
class Modules(Module):
created_at: datetime
module_id: int
class Config:
orm_mode = True
class ModuleList(BaseModel):
module: List[Modules]
|
python
|
from ...Colors import *
from ..BaseControlClass import GradientTypesEnum
from copy import copy
from json import dump, load
class StyleHintsEnum(object):
Flat = 'Flat'
Raised = 'Raised'
Sunken = 'Sunken'
Hover = 'Hover'
Image = 'Image'
# Custom = 'Custom'
class DefaultStyle(object):
def __init__(self, baseColor=None):
if baseColor is None:
baseColor = RGBA255(30, 30, 30, 255)
self._baseColor = vec4(0)
self.activeColor = RGB1(.8, .4, 0)
self.name = 'Default'
self.raisedGradientColor0 = WHITE
self.raisedGradientColor1 = BLACK
self.sunkenGradientColor0 = BLACK
self.sunkenGradientColor1 = WHITE
self.pressedGradientColor0 = BLACK
self.pressedGradientColor1 = WHITE
self.hoverGradientColor0 = WHITE
self.hoverGradientColor1 = BLACK
self.autoRaiseGradientColor0 = WHITE
self.autoRaiseGradientColor1 = BLACK
self.baseColor = baseColor
def _buildGradients(self):
baseColor = self._baseColor
color0 = (baseColor + WHITE / 2.0) / 2.0
color0.w = baseColor.w
color1 = baseColor / 4.0
color1.w = baseColor.w
color2 = (baseColor + WHITE / 3.0) / 2.0
color2.w = baseColor.w
color3 = baseColor / 6.0
color3.w = baseColor.w
color4 = (baseColor + WHITE / 4.0) / 2.0
color4.w = baseColor.w
color5 = baseColor / 8.0
color5.w = baseColor.w
color6 = (baseColor + WHITE / 1.8) / 2.0
color6.w = baseColor.w
color7 = baseColor / 1.4
color7.w = baseColor.w
self.raisedGradientColor0 = color2
self.raisedGradientColor1 = color3
self.sunkenGradientColor0 = color3
self.sunkenGradientColor1 = color2
self.pressedGradientColor0 = color4
self.pressedGradientColor1 = color5
self.hoverGradientColor0 = color0
self.hoverGradientColor1 = color1
self.autoRaiseGradientColor0 = color6
self.autoRaiseGradientColor1 = color7
def __repr__(self):
return str(self.name)
def saveToFile(self, path):
vals = {}
with open(path, 'w') as file:
attribs = dir(self)
for att in attribs:
if not att.startswith('_'):
vals[att] = getattr(self, att)
dump(vals, file, indent=4)
@staticmethod
def readFromFile(path):
style = DefaultStyle()
with open(path) as file:
vals = load(file)
for att in vals.keys:
setattr(style, att, vals[att])
return style
@property
def baseColor(self):
return self._baseColor
@baseColor.setter
def baseColor(self, value):
baseColor = vec4(value)
self._baseColor = value
self.backgroundColor = vec4(baseColor)
self.fontColor = WHITE
self.fontOutlineColor = BLUE
self.fontSize = 10
self.borderSize = 1
self.borderColor = fromRGB1_A(baseColor / 4.0, 1)
self.focusBorderColor = ORANGE
self.hoverBorderColor = GREEN
self.gradientType = GradientTypesEnum.noGradient
self.hoverColor = fromRGB1_A((baseColor + (WHITE / 10.0)), baseColor.w)
self.pressedColor = fromRGB1_A(baseColor / 1.5, baseColor.w)
self.buttonStyleHint = StyleHintsEnum.Raised
self.controlStyleHint = StyleHintsEnum.Raised
self._buildGradients()
def _copy(self):
return copy(self)
|
python
|
lista1 = []
lista2 = []
exp = str(input('Digite a expressão: '))
for c in exp:
if c == '(':
lista1.append('(')
if c == ')':
lista2.append(')')
if len(lista1) == len(lista2):
print('Sua expressão está correta!')
else:
print('Sua expressão está errada!')
|
python
|
import argparse
import logging
import sys
from . import config
from . import gerrit
from . import jenkins
from . import output
def run():
parser = argparse.ArgumentParser(
description='A command line tool for working with Ovirt CI')
parser.add_argument(
'--debug',
help="Show noisy debug logs",
action="store_true")
subparsers = parser.add_subparsers(title="commands")
build_artifacts_parser = subparsers.add_parser(
"build-artifacts",
help="build artifacts for a change")
build_artifacts_parser.set_defaults(command=build_artifacts)
build_artifacts_parser.add_argument(
'change',
help='Gerrit change number')
args = parser.parse_args()
logging.basicConfig(
level=logging.DEBUG if args.debug else logging.WARNING,
format="%(asctime)s %(levelname)-7s [%(name)s] %(message)s")
args.command(args)
def build_artifacts(args):
cfg = config.load()
ga = gerrit.API(host=cfg.gerrit.host)
ja = jenkins.API(
host=cfg.jenkins.host,
user_id=cfg.jenkins.user_id,
api_token=cfg.jenkins.api_token)
out = output.TextOutput(steps=5)
out.step("Getting build info for change %s", args.change)
info = ga.build_info(args.change)
out.step("Starting build-artifacts job")
out.info(("project", info["project"]),
("branch", info["branch"]),
("patchset", info["patchset"]))
queue_url = ja.run(
url=info["url"], ref=info["ref"], stage="build-artifacts")
out.step("Waiting until job is executed")
out.info(("queue", queue_url))
job_url = ja.wait_for_queue(queue_url)
out.step("Waiting until job is completed")
out.info(("job", job_url))
result = ja.wait_for_job(job_url)
if result != "SUCCESS":
out.failure("Build artifcats failed with %s", result)
sys.exit(1)
out.success("Job completed successfully, congratulations!")
|
python
|
# exceptions.py -- custom exception classes for this module
class PayloadException(Exception):
'''
Something went wrong with the payload from the GitHub API.
'''
pass
class WorkerException(Exception):
'''
Something went wrong in the worker process.
'''
pass
class QueueException(Exception):
'''
Something went wrong in the queue process.
'''
pass
|
python
|
import os
from http import HTTPStatus
from pathlib import Path
from typing import Union
from restit._response import Response
from restit.internal.suffix_media_type_mapping import SUFFIX_MEDIA_TYPE_MAPPING
class StaticFileResponse(Response):
def __init__(
self, file_path: Union[str, Path],
status_code: Union[int, HTTPStatus] = HTTPStatus.OK,
headers: dict = None,
suffix: str = None
):
headers = headers or {}
suffix = suffix or StaticFileResponse._get_suffix_from_file_path(file_path)
content_type = SUFFIX_MEDIA_TYPE_MAPPING.get(suffix, )
headers.setdefault("Content-Type", content_type)
with open(file_path, "rb") as fp:
file_content = fp.read()
super().__init__(file_content, status_code, headers)
@staticmethod
def _get_suffix_from_file_path(file_path: str) -> str:
_, suffix = os.path.splitext(file_path)
return suffix
|
python
|
#
# PySNMP MIB module SNMP-REPEATER-MIB (http://pysnmp.sf.net)
# ASN.1 source http://mibs.snmplabs.com:80/asn1/SNMP-REPEATER-MIB
# Produced by pysmi-0.0.7 at Sun Feb 14 00:28:55 2016
# On host bldfarm platform Linux version 4.1.13-100.fc21.x86_64 by user goose
# Using Python version 3.5.0 (default, Jan 5 2016, 17:11:52)
#
( ObjectIdentifier, Integer, OctetString, ) = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
( NamedValues, ) = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
( ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ) = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion")
( OwnerString, ) = mibBuilder.importSymbols("IF-MIB", "OwnerString")
( NotificationGroup, ModuleCompliance, ObjectGroup, ) = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
( ModuleIdentity, IpAddress, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, NotificationType, iso, Counter32, Gauge32, ObjectIdentity, TimeTicks, Bits, Unsigned32, Integer32, mib_2, ) = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "IpAddress", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "NotificationType", "iso", "Counter32", "Gauge32", "ObjectIdentity", "TimeTicks", "Bits", "Unsigned32", "Integer32", "mib-2")
( MacAddress, RowStatus, TextualConvention, TimeStamp, DisplayString, TestAndIncr, ) = mibBuilder.importSymbols("SNMPv2-TC", "MacAddress", "RowStatus", "TextualConvention", "TimeStamp", "DisplayString", "TestAndIncr")
snmpRptrMod = ModuleIdentity((1, 3, 6, 1, 2, 1, 22, 5)).setRevisions(("1993-09-01 00:00", "1992-10-01 00:00",))
if mibBuilder.loadTexts: snmpRptrMod.setLastUpdated('9609140000Z')
if mibBuilder.loadTexts: snmpRptrMod.setOrganization('IETF HUB MIB Working Group')
if mibBuilder.loadTexts: snmpRptrMod.setContactInfo('WG E-mail: [email protected]\n\n Chair: Dan Romascanu\n Postal: Madge Networks (Israel) Ltd.\n Atidim Technology Park, Bldg. 3\n Tel Aviv 61131, Israel\n Tel: 972-3-6458414, 6458458\n Fax: 972-3-6487146\n E-mail: [email protected]\n\n Editor: Kathryn de Graaf\n Postal: 3Com Corporation\n 118 Turnpike Rd.\n Southborough, MA 01772 USA\n Tel: (508)229-1627\n Fax: (508)490-5882\n E-mail: [email protected]')
if mibBuilder.loadTexts: snmpRptrMod.setDescription("Management information for 802.3 repeaters.\n\n The following references are used throughout\n this MIB module:\n\n [IEEE 802.3 Std]\n refers to IEEE 802.3/ISO 8802-3 Information\n processing systems - Local area networks -\n Part 3: Carrier sense multiple access with\n collision detection (CSMA/CD) access method\n and physical layer specifications (1993).\n\n [IEEE 802.3 Mgt]\n refers to IEEE 802.3u-1995, '10 Mb/s &\n 100 Mb/s Management, Section 30,'\n Supplement to ANSI/IEEE 802.3.\n\n The following terms are used throughout this\n MIB module. For complete formal definitions,\n the IEEE 802.3 standards should be consulted\n wherever possible:\n\n System - A managed entity compliant with this\n MIB, and incorporating at least one managed\n 802.3 repeater.\n\n Chassis - An enclosure for one managed repeater,\n part of a managed repeater, or several managed\n repeaters. It typically contains an integral\n power supply and a variable number of available\n module slots.\n\n Repeater-unit - The portion of the repeater set\n that is inboard of the physical media interfaces.\n The physical media interfaces (MAUs, AUIs) may be\n physically separated from the repeater-unit, or\n they may be integrated into the same physical\n package.\n\n Trivial repeater-unit - An isolated port that can\n gather statistics.\n\n Group - A recommended, but optional, entity\n defined by the IEEE 802.3 management standard,\n in order to support a modular numbering scheme.\n The classical example allows an implementor to\n represent field-replaceable units as groups of\n ports, with the port numbering matching the\n modular hardware implementation.\n\n System interconnect segment - An internal\n segment allowing interconnection of ports\n belonging to different physical entities\n into the same logical manageable repeater.\n Examples of implementation might be\n backplane busses in modular hubs, or\n chaining cables in stacks of hubs.\n Stack - A scalable system that may include\n managed repeaters, in which modularity is\n achieved by interconnecting a number of\n different chassis.\n\n Module - A building block in a modular\n chassis. It typically maps into one 'slot';\n however, the range of configurations may be\n very large, with several modules entering\n one slot, or one module covering several\n slots.\n ")
snmpDot3RptrMgt = MibIdentifier((1, 3, 6, 1, 2, 1, 22))
class OptMacAddr(OctetString, TextualConvention):
displayHint = '1x:'
subtypeSpec = OctetString.subtypeSpec+ConstraintsUnion(ValueSizeConstraint(0,0),ValueSizeConstraint(6,6),)
rptrBasicPackage = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 1))
rptrRptrInfo = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 1, 1))
rptrGroupInfo = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 1, 2))
rptrPortInfo = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 1, 3))
rptrAllRptrInfo = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 1, 4))
rptrMonitorPackage = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 2))
rptrMonitorRptrInfo = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 2, 1))
rptrMonitorGroupInfo = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 2, 2))
rptrMonitorPortInfo = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 2, 3))
rptrMonitorAllRptrInfo = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 2, 4))
rptrAddrTrackPackage = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 3))
rptrAddrTrackRptrInfo = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 3, 1))
rptrAddrTrackGroupInfo = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 3, 2))
rptrAddrTrackPortInfo = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 3, 3))
rptrTopNPackage = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 4))
rptrTopNRptrInfo = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 4, 1))
rptrTopNGroupInfo = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 4, 2))
rptrTopNPortInfo = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 4, 3))
rptrGroupCapacity = MibScalar((1, 3, 6, 1, 2, 1, 22, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrGroupCapacity.setDescription('********* THIS OBJECT IS DEPRECATED **********\n\n The rptrGroupCapacity is the number of groups\n that can be contained within the repeater. Within\n each managed repeater, the groups are uniquely\n numbered in the range from 1 to rptrGroupCapacity.\n\n Some groups may not be present in the repeater, in\n which case the actual number of groups present\n will be less than rptrGroupCapacity. The number\n of groups present will never be greater than\n rptrGroupCapacity.\n\n Note: In practice, this will generally be the\n number of field-replaceable units (i.e., modules,\n cards, or boards) that can fit in the physical\n repeater enclosure, and the group numbers will\n correspond to numbers marked on the physical\n enclosure.')
rptrOperStatus = MibScalar((1, 3, 6, 1, 2, 1, 22, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6,))).clone(namedValues=NamedValues(("other", 1), ("ok", 2), ("rptrFailure", 3), ("groupFailure", 4), ("portFailure", 5), ("generalFailure", 6),))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrOperStatus.setDescription("********* THIS OBJECT IS DEPRECATED **********\n\n The rptrOperStatus object indicates the\n operational state of the repeater. The\n rptrHealthText object may be consulted for more\n specific information about the state of the\n repeater's health.\n\n In the case of multiple kinds of failures (e.g.,\n repeater failure and port failure), the value of\n this attribute shall reflect the highest priority\n failure in the following order, listed highest\n priority first:\n\n rptrFailure(3)\n groupFailure(4)\n portFailure(5)\n generalFailure(6).")
rptrHealthText = MibScalar((1, 3, 6, 1, 2, 1, 22, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0,255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrHealthText.setDescription('********* THIS OBJECT IS DEPRECATED **********\n\n The health text object is a text string that\n provides information relevant to the operational\n state of the repeater. Agents may use this string\n to provide detailed information on current\n failures, including how they were detected, and/or\n instructions for problem resolution. The contents\n are agent-specific.')
rptrReset = MibScalar((1, 3, 6, 1, 2, 1, 22, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2,))).clone(namedValues=NamedValues(("noReset", 1), ("reset", 2),))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rptrReset.setDescription('********* THIS OBJECT IS DEPRECATED **********\n\n Setting this object to reset(2) causes a\n transition to the START state of Fig 9-2 in\n section 9 [IEEE 802.3 Std] for a 10Mb/s repeater,\n and the START state of Fig 27-2 in section 27\n of that standard for a 100Mb/s repeater.\n\n Setting this object to noReset(1) has no effect.\n The agent will always return the value noReset(1)\n when this object is read.\n\n After receiving a request to set this variable to\n reset(2), the agent is allowed to delay the reset\n for a short period. For example, the implementor\n may choose to delay the reset long enough to allow\n the SNMP response to be transmitted. In any\n event, the SNMP response must be transmitted.\n\n This action does not reset the management counters\n defined in this document nor does it affect the\n portAdminStatus parameters. Included in this\n action is the execution of a disruptive Self-Test\n with the following characteristics: a) The nature\n of the tests is not specified. b) The test resets\n the repeater but without affecting management\n information about the repeater. c) The test does\n not inject packets onto any segment. d) Packets\n received during the test may or may not be\n transferred. e) The test does not interfere with\n management functions.\n\n After performing this self-test, the agent will\n update the repeater health information (including\n rptrOperStatus and rptrHealthText), and send a\n rptrHealth trap.')
rptrNonDisruptTest = MibScalar((1, 3, 6, 1, 2, 1, 22, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2,))).clone(namedValues=NamedValues(("noSelfTest", 1), ("selfTest", 2),))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rptrNonDisruptTest.setDescription("********* THIS OBJECT IS DEPRECATED **********\n\n Setting this object to selfTest(2) causes the\n repeater to perform a agent-specific, non-\n disruptive self-test that has the following\n characteristics: a) The nature of the tests is\n not specified. b) The test does not change the\n state of the repeater or management information\n about the repeater. c) The test does not inject\n packets onto any segment. d) The test does not\n prevent the relay of any packets. e) The test\n does not interfere with management functions.\n\n After performing this test, the agent will update\n the repeater health information (including\n rptrOperStatus and rptrHealthText) and send a\n rptrHealth trap.\n\n Note that this definition allows returning an\n 'okay' result after doing a trivial test.\n\n Setting this object to noSelfTest(1) has no\n effect. The agent will always return the value\n noSelfTest(1) when this object is read.")
rptrTotalPartitionedPorts = MibScalar((1, 3, 6, 1, 2, 1, 22, 1, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrTotalPartitionedPorts.setDescription('********* THIS OBJECT IS DEPRECATED **********\n\n This object returns the total number of ports in\n the repeater whose current state meets all three\n of the following criteria: rptrPortOperStatus\n does not have the value notPresent(3),\n rptrPortAdminStatus is enabled(1), and\n rptrPortAutoPartitionState is autoPartitioned(2).')
rptrGroupTable = MibTable((1, 3, 6, 1, 2, 1, 22, 1, 2, 1), )
if mibBuilder.loadTexts: rptrGroupTable.setDescription('Table of descriptive and status information about\n the groups of ports.')
rptrGroupEntry = MibTableRow((1, 3, 6, 1, 2, 1, 22, 1, 2, 1, 1), ).setIndexNames((0, "SNMP-REPEATER-MIB", "rptrGroupIndex"))
if mibBuilder.loadTexts: rptrGroupEntry.setDescription('An entry in the table, containing information\n about a single group of ports.')
rptrGroupIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 1, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrGroupIndex.setDescription('This object identifies the group within the\n system for which this entry contains\n information.')
rptrGroupDescr = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 1, 2, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0,255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrGroupDescr.setDescription("********* THIS OBJECT IS DEPRECATED **********\n\n A textual description of the group. This value\n should include the full name and version\n identification of the group's hardware type and\n indicate how the group is differentiated from\n other types of groups in the repeater. Plug-in\n Module, Rev A' or 'Barney Rubble 10BASE-T 4-port\n SIMM socket Version 2.1' are examples of valid\n group descriptions.\n\n It is mandatory that this only contain printable\n ASCII characters.")
rptrGroupObjectID = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 1, 2, 1, 1, 3), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrGroupObjectID.setDescription("The vendor's authoritative identification of the\n group. This value may be allocated within the SMI\n enterprises subtree (1.3.6.1.4.1) and provides a\n straight-forward and unambiguous means for\n determining what kind of group is being managed.\n\n For example, this object could take the value\n 1.3.6.1.4.1.4242.1.2.14 if vendor 'Flintstones,\n Inc.' was assigned the subtree 1.3.6.1.4.1.4242,\n and had assigned the identifier\n 1.3.6.1.4.1.4242.1.2.14 to its 'Wilma Flintstone\n 6-Port FOIRL Plug-in Module.'")
rptrGroupOperStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 1, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6,))).clone(namedValues=NamedValues(("other", 1), ("operational", 2), ("malfunctioning", 3), ("notPresent", 4), ("underTest", 5), ("resetInProgress", 6),))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrGroupOperStatus.setDescription('An object that indicates the operational status\n of the group.\n\n A status of notPresent(4) indicates that the group\n is temporarily or permanently physically and/or\n logically not a part of the repeater. It is an\n implementation-specific matter as to whether the\n agent effectively removes notPresent entries from\n the table.\n\n A status of operational(2) indicates that the\n group is functioning, and a status of\n malfunctioning(3) indicates that the group is\n malfunctioning in some way.')
rptrGroupLastOperStatusChange = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 1, 2, 1, 1, 5), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrGroupLastOperStatusChange.setDescription("********* THIS OBJECT IS DEPRECATED **********\n\n An object that contains the value of sysUpTime at\n the time when the last of the following occurred:\n 1) the agent cold- or warm-started;\n 2) the row for the group was created (such\n as when the group was added to the system); or\n 3) the value of rptrGroupOperStatus for the\n group changed.\n\n A value of zero indicates that the group's\n operational status has not changed since the agent\n last restarted.")
rptrGroupPortCapacity = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 1, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrGroupPortCapacity.setDescription('The rptrGroupPortCapacity is the number of ports\n that can be contained within the group. Valid\n range is 1-2147483647. Within each group, the\n ports are uniquely numbered in the range from 1 to\n rptrGroupPortCapacity.\n\n Some ports may not be present in the system, in\n which case the actual number of ports present\n will be less than the value of rptrGroupPortCapacity.\n The number of ports present in the group will never\n be greater than the value of rptrGroupPortCapacity.\n\n Note: In practice, this will generally be the\n number of ports on a module, card, or board, and\n the port numbers will correspond to numbers marked\n on the physical embodiment.')
rptrPortTable = MibTable((1, 3, 6, 1, 2, 1, 22, 1, 3, 1), )
if mibBuilder.loadTexts: rptrPortTable.setDescription('Table of descriptive and status information about\n the repeater ports in the system. The number of\n entries is independent of the number of repeaters\n in the managed system.')
rptrPortEntry = MibTableRow((1, 3, 6, 1, 2, 1, 22, 1, 3, 1, 1), ).setIndexNames((0, "SNMP-REPEATER-MIB", "rptrPortGroupIndex"), (0, "SNMP-REPEATER-MIB", "rptrPortIndex"))
if mibBuilder.loadTexts: rptrPortEntry.setDescription('An entry in the table, containing information\n about a single port.')
rptrPortGroupIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 1, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrPortGroupIndex.setDescription('This object identifies the group containing the\n port for which this entry contains information.')
rptrPortIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 1, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrPortIndex.setDescription('This object identifies the port within the group\n for which this entry contains information. This\n identifies the port independently from the repeater\n it may be attached to. The numbering scheme for\n ports is implementation specific; however, this\n value can never be greater than\n rptrGroupPortCapacity for the associated group.')
rptrPortAdminStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 1, 3, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2,))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2),))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rptrPortAdminStatus.setDescription("Setting this object to disabled(2) disables the\n port. A disabled port neither transmits nor\n receives. Once disabled, a port must be\n explicitly enabled to restore operation. A port\n which is disabled when power is lost or when a\n reset is exerted shall remain disabled when normal\n operation resumes.\n\n The admin status takes precedence over auto-\n partition and functionally operates between the\n auto-partition mechanism and the AUI/PMA.\n\n Setting this object to enabled(1) enables the port\n and exerts a BEGIN on the port's auto-partition\n state machine.\n\n (In effect, when a port is disabled, the value of\n rptrPortAutoPartitionState for that port is frozen\n until the port is next enabled. When the port\n becomes enabled, the rptrPortAutoPartitionState\n becomes notAutoPartitioned(1), regardless of its\n pre-disabling state.)")
rptrPortAutoPartitionState = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 1, 3, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2,))).clone(namedValues=NamedValues(("notAutoPartitioned", 1), ("autoPartitioned", 2),))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrPortAutoPartitionState.setDescription("The autoPartitionState flag indicates whether the\n port is currently partitioned by the repeater's\n auto-partition protection.\n\n The conditions that cause port partitioning are\n specified in partition state machine in Sections\n 9 and 27 of [IEEE 802.3 Std]. They are not\n differentiated here.")
rptrPortOperStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 1, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3,))).clone(namedValues=NamedValues(("operational", 1), ("notOperational", 2), ("notPresent", 3),))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrPortOperStatus.setDescription("This object indicates the port's operational\n status. The notPresent(3) status indicates the\n port is physically removed (note this may or may\n not be possible depending on the type of port.)\n The operational(1) status indicates that the port\n is enabled (see rptrPortAdminStatus) and working,\n even though it might be auto-partitioned (see\n rptrPortAutoPartitionState).\n\n If this object has the value operational(1) and\n rptrPortAdminStatus is set to disabled(2), it is\n expected that this object's value will soon change\n to notOperational(2).")
rptrPortRptrId = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 1, 3, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0,2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrPortRptrId.setDescription('This object identifies the repeater to\n which this port belongs. The repeater\n identified by a particular value of this object\n is the same as that identified by the same\n value of rptrInfoId. A value of zero\n indicates that this port currently is not\n a member of any repeater.')
rptrInfoTable = MibTable((1, 3, 6, 1, 2, 1, 22, 1, 4, 1), )
if mibBuilder.loadTexts: rptrInfoTable.setDescription('A table of information about each\n non-trivial repeater. The number of entries\n depends on the physical configuration of the\n managed system.')
rptrInfoEntry = MibTableRow((1, 3, 6, 1, 2, 1, 22, 1, 4, 1, 1), ).setIndexNames((0, "SNMP-REPEATER-MIB", "rptrInfoId"))
if mibBuilder.loadTexts: rptrInfoEntry.setDescription('An entry in the table, containing information\n about a single non-trivial repeater.')
rptrInfoId = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 1, 4, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrInfoId.setDescription('This object identifies the repeater for which\n this entry contains information.')
rptrInfoRptrType = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 1, 4, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4,))).clone(namedValues=NamedValues(("other", 1), ("tenMb", 2), ("onehundredMbClassI", 3), ("onehundredMbClassII", 4),))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrInfoRptrType.setDescription('The rptrInfoRptrType returns a value that identifies\n the CSMA/CD repeater type.')
rptrInfoOperStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 1, 4, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3,))).clone(namedValues=NamedValues(("other", 1), ("ok", 2), ("failure", 3),))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrInfoOperStatus.setDescription('The rptrInfoOperStatus object indicates the\n operational state of the repeater.')
rptrInfoReset = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 1, 4, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2,))).clone(namedValues=NamedValues(("noReset", 1), ("reset", 2),))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rptrInfoReset.setDescription('Setting this object to reset(2) causes a\n transition to the START state of Fig 9-2 in\n section 9 [IEEE 802.3 Std] for a 10Mb/s repeater,\n and to the START state of Fig 27-2 in section 27\n of that standard for a 100Mb/s repeater.\n\n Setting this object to noReset(1) has no effect.\n The agent will always return the value noReset(1)\n when this object is read.\n\n After receiving a request to set this variable to\n reset(2), the agent is allowed to delay the reset\n for a short period. For example, the implementor\n may choose to delay the reset long enough to allow\n the SNMP response to be transmitted. In any\n event, the SNMP response must be transmitted.\n\n This action does not reset the management counters\n defined in this document nor does it affect the\n portAdminStatus parameters. Included in this\n action is the execution of a disruptive Self-Test\n with the following characteristics: a) The nature\n of the tests is not specified. b) The test resets\n the repeater but without affecting management\n information about the repeater. c) The test does\n not inject packets onto any segment. d) Packets\n received during the test may or may not be\n transferred. e) The test does not interfere with\n management functions.\n\n After performing this self-test, the agent will\n update the repeater health information (including\n rptrInfoOperStatus), and send a rptrInfoResetEvent\n notification.')
rptrInfoPartitionedPorts = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 1, 4, 1, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrInfoPartitionedPorts.setDescription('This object returns the total number of ports in\n the repeater whose current state meets all three\n of the following criteria: rptrPortOperStatus\n does not have the value notPresent(3),\n rptrPortAdminStatus is enabled(1), and\n rptrPortAutoPartitionState is autoPartitioned(2).')
rptrInfoLastChange = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 1, 4, 1, 1, 6), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrInfoLastChange.setDescription('The value of sysUpTime when any of the following\n conditions occurred:\n 1) agent cold- or warm-started;\n 2) this instance of repeater was created\n (such as when a device or module was\n added to the system);\n 3) a change in the value of rptrInfoOperStatus;\n 4) ports were added or removed as members of\n the repeater; or\n 5) any of the counters associated with this\n repeater had a discontinuity.')
rptrMonitorTransmitCollisions = MibScalar((1, 3, 6, 1, 2, 1, 22, 2, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorTransmitCollisions.setDescription('********* THIS OBJECT IS DEPRECATED **********\n\n For a clause 9 (10Mb/s) repeater, this counter\n is incremented every time the repeater state\n machine enters the TRANSMIT COLLISION state\n from any state other than ONE PORT LEFT\n (Ref: Fig 9-2 [IEEE 802.3 Std]).\n\n For a clause 27 repeater, this counter is\n incremented every time the repeater core state\n diagram enters the Jam state as a result of\n Activity(ALL) > 1 (fig 27-2 [IEEE 802.3 Std]).\n The approximate minimum time for rollover of this\n counter is 16 hours in a 10Mb/s repeater and 1.6\n hours in a 100Mb/s repeater.')
rptrMonitorGroupTable = MibTable((1, 3, 6, 1, 2, 1, 22, 2, 2, 1), )
if mibBuilder.loadTexts: rptrMonitorGroupTable.setDescription('********* THIS OBJECT IS DEPRECATED **********\n\n Table of performance and error statistics for the\n groups within the repeater. The number of entries\n is the same as that in the rptrGroupTable.')
rptrMonitorGroupEntry = MibTableRow((1, 3, 6, 1, 2, 1, 22, 2, 2, 1, 1), ).setIndexNames((0, "SNMP-REPEATER-MIB", "rptrMonitorGroupIndex"))
if mibBuilder.loadTexts: rptrMonitorGroupEntry.setDescription("********* THIS OBJECT IS DEPRECATED **********\n\n An entry in the table, containing total\n performance and error statistics for a single\n group. Regular retrieval of the information in\n this table provides a means of tracking the\n performance and health of the networked devices\n attached to this group's ports.\n\n The counters in this table are redundant in the\n sense that they are the summations of information\n already available through other objects. However,\n these sums provide a considerable optimization of\n network management traffic over the otherwise\n necessary retrieval of the individual counters\n included in each sum.\n\n Note: Group-level counters are\n deprecated in this MIB. It is recommended\n that management applications instead use\n the repeater-level counters contained in\n the rptrMonTable.")
rptrMonitorGroupIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorGroupIndex.setDescription('********* THIS OBJECT IS DEPRECATED **********\n\n This object identifies the group within the\n repeater for which this entry contains\n information.')
rptrMonitorGroupTotalFrames = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 2, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorGroupTotalFrames.setDescription('********* THIS OBJECT IS DEPRECATED **********\n\n The total number of frames of valid frame length\n that have been received on the ports in this group\n and for which the FCSError and CollisionEvent\n signals were not asserted. This counter is the\n summation of the values of the\n rptrMonitorPortReadableFrames counters for all of\n the ports in the group.\n\n This statistic provides one of the parameters\n necessary for obtaining the packet error rate.\n The approximate minimum time for rollover of this\n counter is 80 hours in a 10Mb/s repeater.')
rptrMonitorGroupTotalOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 2, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorGroupTotalOctets.setDescription('********* THIS OBJECT IS DEPRECATED **********\n\n The total number of octets contained in the valid\n frames that have been received on the ports in\n this group. This counter is the summation of the\n values of the rptrMonitorPortReadableOctets\n counters for all of the ports in the group.\n\n This statistic provides an indicator of the total\n data transferred. The approximate minimum time\n for rollover of this counter is 58 minutes in a\n 10Mb/s repeater.')
rptrMonitorGroupTotalErrors = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 2, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorGroupTotalErrors.setDescription('********* THIS OBJECT IS DEPRECATED **********\n\n The total number of errors which have occurred on\n all of the ports in this group. This counter is\n the summation of the values of the\n rptrMonitorPortTotalErrors counters for all of the\n ports in the group.')
rptrMonitorPortTable = MibTable((1, 3, 6, 1, 2, 1, 22, 2, 3, 1), )
if mibBuilder.loadTexts: rptrMonitorPortTable.setDescription('Table of performance and error statistics for the\n ports. The number of entries is the same as that\n in the rptrPortTable.\n\n The columnar object rptrMonitorPortLastChange\n is used to indicate possible discontinuities\n of counter type columnar objects in the table.')
rptrMonitorPortEntry = MibTableRow((1, 3, 6, 1, 2, 1, 22, 2, 3, 1, 1), ).setIndexNames((0, "SNMP-REPEATER-MIB", "rptrMonitorPortGroupIndex"), (0, "SNMP-REPEATER-MIB", "rptrMonitorPortIndex"))
if mibBuilder.loadTexts: rptrMonitorPortEntry.setDescription('An entry in the table, containing performance and\n error statistics for a single port.')
rptrMonitorPortGroupIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorPortGroupIndex.setDescription('This object identifies the group containing the\n port for which this entry contains information.')
rptrMonitorPortIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorPortIndex.setDescription('This object identifies the port within the group\n for which this entry contains information.')
rptrMonitorPortReadableFrames = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 3, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorPortReadableFrames.setDescription('This object is the number of frames of valid\n frame length that have been received on this port.\n This counter is incremented by one for each frame\n received on this port whose OctetCount is greater\n than or equal to minFrameSize and less than or\n equal to maxFrameSize (Ref: IEEE 802.3 Std,\n 4.4.2.1) and for which the FCSError and\n CollisionEvent signals are not asserted.\n\n A discontinuity may occur in the value\n when the value of object\n rptrMonitorPortLastChange changes.\n\n This statistic provides one of the parameters\n necessary for obtaining the packet error rate.\n The approximate minimum time for rollover of this\n counter is 80 hours at 10Mb/s.')
rptrMonitorPortReadableOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 3, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorPortReadableOctets.setDescription("This object is the number of octets contained in\n valid frames that have been received on this port.\n This counter is incremented by OctetCount for each\n frame received on this port which has been\n determined to be a readable frame (i.e., including\n FCS octets but excluding framing bits and dribble\n bits).\n\n A discontinuity may occur in the value\n when the value of object\n rptrMonitorPortLastChange changes.\n\n This statistic provides an indicator of the total\n data transferred. The approximate minimum time\n for rollover of this counter in a 10Mb/s repeater\n is 58 minutes.\n\n For ports receiving traffic at a maximum rate in\n a 100Mb/s repeater, this counter can roll over\n in less than 6 minutes. Since that amount of time\n could be less than a management station's poll cycle\n time, in order to avoid a loss of information a\n management station is advised to also poll the\n rptrMonitorPortUpper32Octets object, or to use the\n 64-bit counter defined by\n rptrMonitorPortHCReadableOctets instead of the\n two 32-bit counters.")
rptrMonitorPortFCSErrors = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 3, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorPortFCSErrors.setDescription('This counter is incremented by one for each frame\n received on this port with the FCSError signal\n asserted and the FramingError and CollisionEvent\n signals deasserted and whose OctetCount is greater\n than or equal to minFrameSize and less than or\n equal to maxFrameSize (Ref: 4.4.2.1, IEEE 802.3\n Std).\n\n A discontinuity may occur in the value\n when the value of object\n rptrMonitorPortLastChange changes.\n\n The approximate minimum time for rollover of this\n counter is 80 hours at 10Mb/s.')
rptrMonitorPortAlignmentErrors = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 3, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorPortAlignmentErrors.setDescription('This counter is incremented by one for each frame\n received on this port with the FCSError and\n FramingError signals asserted and CollisionEvent\n signal deasserted and whose OctetCount is greater\n than or equal to minFrameSize and less than or\n equal to maxFrameSize (Ref: IEEE 802.3 Std,\n 4.4.2.1). If rptrMonitorPortAlignmentErrors is\n incremented then the rptrMonitorPortFCSErrors\n Counter shall not be incremented for the same\n frame.\n\n A discontinuity may occur in the value\n when the value of object\n rptrMonitorPortLastChange changes.\n\n The approximate minimum time for rollover of this\n counter is 80 hours at 10Mb/s.')
rptrMonitorPortFrameTooLongs = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 3, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorPortFrameTooLongs.setDescription('This counter is incremented by one for each frame\n received on this port whose OctetCount is greater\n than maxFrameSize (Ref: 4.4.2.1, IEEE 802.3 Std).\n If rptrMonitorPortFrameTooLongs is incremented\n then neither the rptrMonitorPortAlignmentErrors\n nor the rptrMonitorPortFCSErrors counter shall be\n incremented for the frame.\n\n A discontinuity may occur in the value\n when the value of object\n rptrMonitorPortLastChange changes.\n\n The approximate minimum time for rollover of this\n counter is 61 days in a 10Mb/s repeater.')
rptrMonitorPortShortEvents = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 3, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorPortShortEvents.setDescription('This counter is incremented by one for each\n CarrierEvent on this port with ActivityDuration\n less than ShortEventMaxTime. ShortEventMaxTime is\n greater than 74 bit times and less than 82 bit\n times. ShortEventMaxTime has tolerances included\n to provide for circuit losses between a\n conformance test point at the AUI and the\n measurement point within the state machine.\n\n Notes:\n\n ShortEvents may indicate externally\n generated noise hits which will cause the repeater\n to transmit Runts to its other ports, or propagate\n a collision (which may be late) back to the\n transmitting DTE and damaged frames to the rest of\n the network.\n\n Implementors may wish to consider selecting the\n ShortEventMaxTime towards the lower end of the\n allowed tolerance range to accommodate bit losses\n suffered through physical channel devices not\n budgeted for within this standard.\n\n The significance of this attribute is different\n in 10 and 100 Mb/s collision domains. Clause 9\n repeaters perform fragment extension of short\n events which would be counted as runts on the\n interconnect ports of other repeaters. Clause\n 27 repeaters do not perform fragment extension.\n\n A discontinuity may occur in the value\n when the value of object\n rptrMonitorPortLastChange changes.\n\n The approximate minimum time for rollover of this\n counter is 16 hours in a 10Mb/s repeater.')
rptrMonitorPortRunts = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 3, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorPortRunts.setDescription('This counter is incremented by one for each\n CarrierEvent on this port that meets one of the\n following two conditions. Only one test need be\n made. a) The ActivityDuration is greater than\n ShortEventMaxTime and less than ValidPacketMinTime\n and the CollisionEvent signal is deasserted. b)\n The OctetCount is less than 64, the\n ActivityDuration is greater than ShortEventMaxTime\n and the CollisionEvent signal is deasserted.\n ValidPacketMinTime is greater than or equal to 552\n bit times and less than 565 bit times.\n\n An event whose length is greater than 74 bit times\n but less than 82 bit times shall increment either\n the shortEvents counter or the runts counter but\n not both. A CarrierEvent greater than or equal to\n 552 bit times but less than 565 bit times may or\n may not be counted as a runt.\n\n ValidPacketMinTime has tolerances included to\n provide for circuit losses between a conformance\n test point at the AUI and the measurement point\n within the state machine.\n\n Runts usually indicate collision fragments, a\n normal network event. In certain situations\n associated with large diameter networks a\n percentage of collision fragments may exceed\n ValidPacketMinTime.\n A discontinuity may occur in the value\n when the value of object\n rptrMonitorPortLastChange changes.\n\n The approximate minimum time for rollover of this\n counter is 16 hours in a 10Mb/s repeater.')
rptrMonitorPortCollisions = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 3, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorPortCollisions.setDescription('For a clause 9 repeater, this counter is\n incremented by one for any CarrierEvent signal\n on any port for which the CollisionEvent signal\n on this port is asserted. For a clause 27\n repeater port the counter increments on entering\n the Collision Count Increment state of the\n partition state diagram (fig 27-8 of\n [IEEE 802.3 Std]).\n\n A discontinuity may occur in the value\n when the value of object\n rptrMonitorPortLastChange changes.\n\n The approximate minimum time for rollover of this\n counter is 16 hours in a 10Mb/s repeater.')
rptrMonitorPortLateEvents = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 3, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorPortLateEvents.setDescription('For a clause 9 repeater port, this counter is\n incremented by one for each CarrierEvent\n on this port in which the CollIn(X)\n variable transitions to the value SQE (Ref:\n 9.6.6.2, IEEE 802.3 Std) while the\n ActivityDuration is greater than the\n LateEventThreshold. For a clause 27 repeater\n port, this counter is incremented by one on\n entering the Collision Count Increment state\n of the partition state diagram (fig 27-8)\n while the ActivityDuration is greater than\n the LateEvent- Threshold. Such a CarrierEvent\n is counted twice, as both a collision and as a\n lateEvent.\n\n The LateEventThreshold is greater than 480 bit\n times and less than 565 bit times.\n LateEventThreshold has tolerances included to\n permit an implementation to build a single\n threshold to serve as both the LateEventThreshold\n and ValidPacketMinTime threshold.\n\n A discontinuity may occur in the value\n when the value of object\n rptrMonitorPortLastChange changes.\n\n The approximate minimum time for rollover of this\n counter is 81 hours in a 10Mb/s repeater.')
rptrMonitorPortVeryLongEvents = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 3, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorPortVeryLongEvents.setDescription('For a clause 9 repeater port, this counter\n is incremented by one for each CarrierEvent\n whose ActivityDuration is greater than the\n MAU Jabber Lockup Protection timer TW3\n (Ref: 9.6.1 & 9.6.5, IEEE 802.3 Std).\n\n For a clause 27 repeater port, this counter\n is incremented by one on entry to the\n Rx Jabber state of the receiver timer state\n diagram (fig 27-7). Other counters may\n be incremented as appropriate.\n\n A discontinuity may occur in the value\n when the value of object\n rptrMonitorPortLastChange changes.')
rptrMonitorPortDataRateMismatches = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 3, 1, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorPortDataRateMismatches.setDescription("This counter is incremented by one for each\n frame received by this port that meets all\n of the conditions required by only one of the\n following two measurement methods:\n\n Measurement method A: 1) The CollisionEvent\n signal is not asserted (10Mb/s operation) or\n the Collision Count Increment state of the\n partition state diagram (fig 27-8 of\n [IEEE 802.3 Std]) has not been entered\n (100Mb/s operation). 2) The ActivityDuration\n is greater than ValidPacketMinTime. 3) The\n frequency (data rate) is detectably mismatched\n from the local transmit frequency.\n\n Measurement method B: 1) The CollisionEvent\n signal is not asserted (10Mb/s operation)\n or the Collision Count Increment state of the\n partition state diagram (fig 27-8 of\n [IEEE 802.3 Std]) has not been entered\n (100Mb/s operation). 2) The OctetCount is\n greater than 63. 3) The frequency (data\n rate) is detectably mismatched from the local\n transmit frequency. The exact degree of\n mismatch is vendor specific and is to be\n defined by the vendor for conformance testing.\n\n When this event occurs, other counters whose\n increment conditions were satisfied may or may not\n also be incremented, at the implementor's\n discretion. Whether or not the repeater was able\n to maintain data integrity is beyond the scope of\n this standard.\n\n A discontinuity may occur in the value\n when the value of object\n rptrMonitorPortLastChange changes.")
rptrMonitorPortAutoPartitions = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 3, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorPortAutoPartitions.setDescription('This counter is incremented by one for\n each time the repeater has automatically\n partitioned this port.\n\n The conditions that cause a clause 9\n repeater port to partition are specified in\n the partition state diagram in clause 9 of\n [IEEE 802.3 Std]. They are not differentiated\n here. A clause 27 repeater port partitions\n on entry to the Partition Wait state of the\n partition state diagram (fig 27-8 in\n [IEEE 802.3 Std]).\n\n A discontinuity may occur in the value\n when the value of object\n rptrMonitorPortLastChange changes.')
rptrMonitorPortTotalErrors = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 3, 1, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorPortTotalErrors.setDescription('The total number of errors which have occurred on\n this port. This counter is the summation of the\n values of other error counters (for the same\n port), namely:\n\n rptrMonitorPortFCSErrors,\n rptrMonitorPortAlignmentErrors,\n rptrMonitorPortFrameTooLongs,\n rptrMonitorPortShortEvents,\n rptrMonitorPortLateEvents,\n rptrMonitorPortVeryLongEvents,\n rptrMonitorPortDataRateMismatches, and\n rptrMonitorPortSymbolErrors.\n\n This counter is redundant in the sense that it is\n the summation of information already available\n through other objects. However, it is included\n specifically because the regular retrieval of this\n object as a means of tracking the health of a port\n provides a considerable optimization of network\n management traffic over the otherwise necessary\n retrieval of the summed counters.\n\n Note that rptrMonitorPortRunts is not included\n in this total; this is because runts usually\n indicate collision fragments, a normal network\n event.\n\n A discontinuity may occur in the value\n when the value of object\n rptrMonitorPortLastChange changes.')
rptrMonitorPortLastChange = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 3, 1, 1, 16), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorPortLastChange.setDescription('The value of sysUpTime when the last of\n the following occurred:\n 1) the agent cold- or warm-started;\n 2) the row for the port was created\n (such as when a device or module was added\n to the system); or\n 3) any condition that would cause one of\n the counters for the row to experience\n a discontinuity.')
rptrMonitor100PortTable = MibTable((1, 3, 6, 1, 2, 1, 22, 2, 3, 2), )
if mibBuilder.loadTexts: rptrMonitor100PortTable.setDescription('Table of additional performance and error\n statistics for 100Mb/s ports, above and\n beyond those parameters that apply to both\n 10 and 100Mbps ports. Entries exist only for\n ports attached to 100Mbps repeaters.\n\n The columnar object rptrMonitorPortLastChange\n is used to indicate possible discontinuities\n of counter type columnar objects in this table.')
rptrMonitor100PortEntry = MibTableRow((1, 3, 6, 1, 2, 1, 22, 2, 3, 2, 1), ).setIndexNames((0, "SNMP-REPEATER-MIB", "rptrMonitorPortGroupIndex"), (0, "SNMP-REPEATER-MIB", "rptrMonitorPortIndex"))
if mibBuilder.loadTexts: rptrMonitor100PortEntry.setDescription('An entry in the table, containing performance\n and error statistics for a single 100Mb/s port.')
rptrMonitorPortIsolates = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 3, 2, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorPortIsolates.setDescription('This counter is incremented by one each time that\n the repeater port automatically isolates as a\n consequence of false carrier events. The conditions\n which cause a port to automatically isolate are\n defined by the transition from the False Carrier\n state to the Link Unstable state of the carrier\n integrity state diagram (figure 27-9)\n [IEEE 802.3 Standard].\n\n Note: Isolates do not affect the value of\n the PortOperStatus object.\n\n A discontinuity may occur in the value\n when the value of object\n rptrMonitorPortLastChange changes.')
rptrMonitorPortSymbolErrors = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 3, 2, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorPortSymbolErrors.setDescription('This counter is incremented by one each time when\n valid length packet was received at the port and\n there was at least one occurrence of an invalid\n data symbol. This can increment only once per valid\n carrier event. A collision presence at any port of\n the repeater containing port N, will not cause this\n attribute to increment.\n\n A discontinuity may occur in the value\n when the value of object\n rptrMonitorPortLastChange changes.\n\n The approximate minimum time for rollover of this\n counter is 7.4 hours at 100Mb/s.')
rptrMonitorPortUpper32Octets = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 3, 2, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorPortUpper32Octets.setDescription('This object is the number of octets contained in\n valid frames that have been received on this port,\n modulo 2**32. That is, it contains the upper 32\n bits of a 64-bit octets counter, of which the\n lower 32 bits are contained in the\n rptrMonitorPortReadableOctets object.\n\n This two-counter mechanism is provided for those\n network management protocols that do not support\n 64-bit counters (e.g. SNMP V1) and are used to\n manage a repeater type of 100Mb/s.\n\n Conformance clauses for this MIB are defined such\n that implementation of this object is not required\n in a system which does not support 100Mb/s.\n However, systems with mixed 10 and 100Mb/s ports\n may implement this object across all ports,\n including 10Mb/s. If this object is implemented,\n it must be according to the definition in the first\n paragraph of this description; that is, the value\n of this object MUST be a valid count.\n\n A discontinuity may occur in the value\n when the value of object\n rptrMonitorPortLastChange changes.')
rptrMonitorPortHCReadableOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 3, 2, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonitorPortHCReadableOctets.setDescription('This object is the number of octets contained in\n valid frames that have been received on this port.\n This counter is incremented by OctetCount for each\n frame received on this port which has been\n determined to be a readable frame (i.e., including\n FCS octets but excluding framing bits and dribble\n bits).\n\n This statistic provides an indicator of the total\n data transferred.\n\n This counter is a 64-bit version of rptrMonitor-\n PortReadableOctets. It should be used by network\n management protocols which suppport 64-bit counters\n (e.g. SNMPv2).\n\n Conformance clauses for this MIB are defined such\n that implementation of this object is not required\n in a system which does not support 100Mb/s.\n However, systems with mixed 10 and 100Mb/s ports\n may implement this object across all ports,\n including 10Mb/s. If this object is implemented,\n it must be according to the definition in the first\n paragraph of this description; that is, the value\n of this object MUST be a valid count.\n\n A discontinuity may occur in the value\n when the value of object\n rptrMonitorPortLastChange changes.')
rptrMonTable = MibTable((1, 3, 6, 1, 2, 1, 22, 2, 4, 1), )
if mibBuilder.loadTexts: rptrMonTable.setDescription('A table of information about each\n non-trivial repeater. The number of entries\n in this table is the same as the number of\n entries in the rptrInfoTable.\n\n The columnar object rptrInfoLastChange is\n used to indicate possible discontinuities of\n counter type columnar objects in this table.')
rptrMonEntry = MibTableRow((1, 3, 6, 1, 2, 1, 22, 2, 4, 1, 1), ).setIndexNames((0, "SNMP-REPEATER-MIB", "rptrInfoId"))
if mibBuilder.loadTexts: rptrMonEntry.setDescription('An entry in the table, containing information\n about a single non-trivial repeater.')
rptrMonTxCollisions = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 4, 1, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonTxCollisions.setDescription('For a clause 9 (10Mb/s) repeater, this counter\n is incremented every time the repeater state\n machine enters the TRANSMIT COLLISION state\n from any state other than ONE PORT LEFT\n (Ref: Fig 9-2 [IEEE 802.3 Std]).\n\n For a clause 27 repeater, this counter is\n incremented every time the repeater core state\n diagram enters the Jam state as a result of\n Activity(ALL) > 1 (fig 27-2 [IEEE 802.3 Std]).\n\n The approximate minimum time for rollover of this\n counter is 16 hours in a 10Mb/s repeater and 1.6\n hours in a 100Mb/s repeater.')
rptrMonTotalFrames = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 4, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonTotalFrames.setDescription('The number of frames of valid frame length\n that have been received on the ports in this repeater\n and for which the FCSError and CollisionEvent\n signals were not asserted. If an implementation\n can not obtain a count of frames as seen by\n the repeater itself, this counter may be\n implemented as the summation of the values of the\n rptrMonitorPortReadableFrames counters for all of\n the ports in the repeater.\n\n This statistic provides one of the parameters\n necessary for obtaining the packet error rate.\n The approximate minimum time for rollover of this\n counter is 80 hours in a 10Mb/s repeater.')
rptrMonTotalErrors = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 4, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonTotalErrors.setDescription('The total number of errors which have occurred on\n all of the ports in this repeater. The errors\n included in this count are the same as those listed\n for the rptrMonitorPortTotalErrors counter. If an\n implementation can not obtain a count of these\n errors as seen by the repeater itself, this counter\n may be implemented as the summation of the values of\n the rptrMonitorPortTotalErrors counters for all of\n the ports in the repeater.')
rptrMonTotalOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 4, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonTotalOctets.setDescription("The total number of octets contained in the valid\n frames that have been received on the ports in\n this group. If an implementation can not obtain\n a count of octets as seen by the repeater itself,\n this counter may be the summation of the\n values of the rptrMonitorPortReadableOctets\n counters for all of the ports in the group.\n\n This statistic provides an indicator of the total\n data transferred. The approximate minimum time\n for rollover of this counter in a 10Mb/s repeater\n is 58 minutes divided by the number of ports in\n the repeater.\n\n For 100Mb/s repeaters processing traffic at a\n maximum rate, this counter can roll over in less\n than 6 minutes divided by the number of ports in\n the repeater. Since that amount of time could\n be less than a management station's poll cycle\n time, in order to avoid a loss of information a\n management station is advised to also poll the\n rptrMonUpper32TotalOctets object, or to use the\n 64-bit counter defined by rptrMonHCTotalOctets\n instead of the two 32-bit counters.")
rptrMon100Table = MibTable((1, 3, 6, 1, 2, 1, 22, 2, 4, 2), )
if mibBuilder.loadTexts: rptrMon100Table.setDescription('A table of additional information about each\n 100Mb/s repeater, augmenting the entries in\n the rptrMonTable. Entries exist in this table\n only for 100Mb/s repeaters.\n\n The columnar object rptrInfoLastChange is\n used to indicate possible discontinuities of\n counter type columnar objects in this table.')
rptrMon100Entry = MibTableRow((1, 3, 6, 1, 2, 1, 22, 2, 4, 2, 1), ).setIndexNames((0, "SNMP-REPEATER-MIB", "rptrInfoId"))
if mibBuilder.loadTexts: rptrMon100Entry.setDescription('An entry in the table, containing information\n about a single 100Mbps repeater.')
rptrMonUpper32TotalOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 4, 2, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonUpper32TotalOctets.setDescription('The total number of octets contained in the valid\n frames that have been received on the ports in\n this repeater, modulo 2**32. That is, it contains\n the upper 32 bits of a 64-bit counter, of which\n the lower 32 bits are contained in the\n rptrMonTotalOctets object. If an implementation\n can not obtain a count of octets as seen\n by the repeater itself, the 64-bit value\n may be the summation of the values of the\n rptrMonitorPortReadableOctets counters combined\n with the corresponding rptrMonitorPortUpper32Octets\n counters for all of the ports in the repeater.\n\n This statistic provides an indicator of the total\n data transferred within the repeater.\n\n This two-counter mechanism is provided for those\n network management protocols that do not support\n 64-bit counters (e.g. SNMP V1) and are used to\n manage a repeater type of 100Mb/s.\n\n Conformance clauses for this MIB are defined such\n that implementation of this object is not required\n in a system which does not support 100Mb/s.\n However, systems with mixed 10 and 100Mb/s ports\n may implement this object across all ports,\n including 10Mb/s. If this object is implemented,\n it must be according to the definition in the first\n paragraph of this description; that is, the value\n of this object MUST be a valid count.')
rptrMonHCTotalOctets = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 2, 4, 2, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrMonHCTotalOctets.setDescription('The total number of octets contained in the valid\n frames that have been received on the ports in\n this group. If a implementation can not obtain\n a count of octets as seen by the repeater itself,\n this counter may be the summation of the\n values of the rptrMonitorPortReadableOctets\n counters for all of the ports in the group.\n\n This statistic provides an indicator of the total\n data transferred.\n\n This counter is a 64-bit (high-capacity) version\n of rptrMonUpper32TotalOctets and rptrMonTotalOctets.\n It should be used by network management protocols\n which support 64-bit counters (e.g. SNMPv2).\n\n Conformance clauses for this MIB are defined such\n that implementation of this object is not required\n in a system which does not support 100Mb/s.\n However, systems with mixed 10 and 100Mb/s ports\n may implement this object across all ports,\n including 10Mb/s. If this object is implemented,\n it must be according to the definition in the first\n paragraph of this description; that is, the value\n of this object MUST be a valid count.')
rptrAddrSearchTable = MibTable((1, 3, 6, 1, 2, 1, 22, 3, 1, 1), )
if mibBuilder.loadTexts: rptrAddrSearchTable.setDescription("This table contains one entry per repeater in the\n system. It defines objects which allow a network\n management application to instruct an agent to watch\n for a given MAC address and report which port it\n was seen on. Only one address search can be in\n progress on each repeater at any one time. Before\n starting an address search, a management application\n should obtain 'ownership' of the entry in\n rptrAddrSearchTable for the repeater that is to\n perform the search. This is accomplished with the\n rptrAddrSearchLock and rptrAddrSearchStatus as\n follows:\n\n try_again:\n get(rptrAddrSearchLock, rptrAddrSearchStatus)\n while (rptrAddrSearchStatus != notInUse)\n {\n /* Loop waiting for objects to be available*/\n short delay\n get(rptrAddrSearchLock, rptrAddrSearchStatus)\n }\n\n /* Try to claim map objects */\n lock_value = rptrAddrSearchLock\n if ( set(rptrAddrSearchLock = lock_value,\n rptrAddrSearchStatus = inUse,\n rptrAddrSearchOwner = 'my-IP-address)\n == FAILURE)\n /* Another manager got the lock */\n goto try_again\n\n /* I have the lock */\n set (rptrAddrSearchAddress = <search target>)\n\n wait for rptrAddrSearchState to change from none\n\n if (rptrAddrSearchState == single)\n get (rptrAddrSearchGroup, rptrAddrSearchPort)\n\n /* release the lock, making sure not to overwrite\n anyone else's lock */\n set (rptrAddrSearchLock = lock_value+1,\n rptrAddrSearchStatus = notInUse,\n rptrAddrSearchOwner = '')\n\n A management station first retrieves the values of\n the appropriate instances of the rptrAddrSearchLock\n and rptrAddrSearchStatus objects, periodically\n repeating the retrieval if necessary, until the value\n of rptrAddrSearchStatus is 'notInUse'. The\n management station then tries to set the same\n instance of the rptrAddrSearchLock object to the\n value it just retrieved, the same instance of the\n rptrAddrSearchStatus object to 'inUse', and the\n corresponding instance of rptrAddrSearchOwner to a\n value indicating itself. If the set operation\n succeeds, then the management station has obtained\n ownership of the rptrAddrSearchEntry, and the value\n of rptrAddrSearchLock is incremented by the agent (as\n per the semantics of TestAndIncr). Failure of the\n set operation indicates that some other manager has\n obtained ownership of the rptrAddrSearchEntry.\n\n Once ownership is obtained, the management station\n can proceed with the search operation. Note that the\n agent will reset rptrAddrSearchStatus to 'notInUse'\n if it has been in the 'inUse' state for an abnormally\n long period of time, to prevent a misbehaving manager\n from permanently locking the entry. It is suggested\n that this timeout period be between one and five\n minutes.\n\n When the management station has completed its search\n operation, it should free the entry by setting\n the instance of the rptrAddrSearchLock object to the\n previous value + 1, the instance of the\n rptrAddrSearchStatus to 'notInUse', and the instance\n of rptrAddrSearchOwner to a zero length string. This\n is done to prevent overwriting another station's\n lock.")
rptrAddrSearchEntry = MibTableRow((1, 3, 6, 1, 2, 1, 22, 3, 1, 1, 1), ).setIndexNames((0, "SNMP-REPEATER-MIB", "rptrInfoId"))
if mibBuilder.loadTexts: rptrAddrSearchEntry.setDescription('An entry containing objects for invoking an address\n search on a repeater.')
rptrAddrSearchLock = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 3, 1, 1, 1, 1), TestAndIncr()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rptrAddrSearchLock.setDescription('This object is used by a management station as an\n advisory lock for this rptrAddrSearchEntry.')
rptrAddrSearchStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 3, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2,))).clone(namedValues=NamedValues(("notInUse", 1), ("inUse", 2),))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rptrAddrSearchStatus.setDescription("This object is used to indicate that some management\n station is currently using this rptrAddrSearchEntry.\n Cooperating managers should set this object to\n 'notInUse' when they are finished using this entry.\n The agent will automatically set the value of this\n object to 'notInUse' if it has been set to 'inUse'\n for an unusually long period of time.")
rptrAddrSearchAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 3, 1, 1, 1, 3), MacAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rptrAddrSearchAddress.setDescription("This object is used to search for a specified MAC\n address. When this object is set, an address search\n begins. This automatically sets the corresponding\n instance of the rptrAddrSearchState object to 'none'\n and the corresponding instances of the\n rptrAddrSearchGroup and rptrAddrSearchPort objects to\n 0.\n\n When a valid frame is received by this repeater with\n a source MAC address which matches the current value\n of rptrAddrSearchAddress, the agent will update the\n corresponding instances of rptrAddrSearchState,\n rptrAddrSearchGroup and rptrAddrSearchPort to reflect\n the current status of the search, and the group and\n port on which the frame was seen.")
rptrAddrSearchState = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 3, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3,))).clone(namedValues=NamedValues(("none", 1), ("single", 2), ("multiple", 3),))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrAddrSearchState.setDescription("The current state of the MAC address search on this\n repeater. This object is initialized to 'none' when\n the corresponding instance of rptrAddrSearchAddress\n is set. If the agent detects the address on exactly\n one port, it will set this object to 'single', and\n set the corresponding instances of\n rptrAddrSearchGroup and rptrAddrSearchPort to reflect\n the group and port on which the address was heard.\n If the agent detects the address on more than one\n port, it will set this object to 'multiple'.")
rptrAddrSearchGroup = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 3, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0,2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrAddrSearchGroup.setDescription("The group from which an error-free frame whose\n source address is equal to the corresponding instance\n of rptrAddrSearchAddress has been received. The\n value of this object is undefined when the\n corresponding instance of rptrAddrSearchState is\n equal to 'none' or 'multiple'.")
rptrAddrSearchPort = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 3, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0,2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrAddrSearchPort.setDescription("The port rom which an error-free frame whose\n source address is equal to the corresponding instance\n of rptrAddrSearchAddress has been received. The\n value of this object is undefined when the\n corresponding instance of rptrAddrSearchState is\n equal to 'none' or 'multiple'.")
rptrAddrSearchOwner = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 3, 1, 1, 1, 7), OwnerString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rptrAddrSearchOwner.setDescription("The entity which currently has 'ownership' of this\n rptrAddrSearchEntry.")
rptrAddrTrackTable = MibTable((1, 3, 6, 1, 2, 1, 22, 3, 3, 1), )
if mibBuilder.loadTexts: rptrAddrTrackTable.setDescription('Table of address mapping information about the\n ports.')
rptrAddrTrackEntry = MibTableRow((1, 3, 6, 1, 2, 1, 22, 3, 3, 1, 1), ).setIndexNames((0, "SNMP-REPEATER-MIB", "rptrAddrTrackGroupIndex"), (0, "SNMP-REPEATER-MIB", "rptrAddrTrackPortIndex"))
if mibBuilder.loadTexts: rptrAddrTrackEntry.setDescription('An entry in the table, containing address mapping\n information about a single port.')
rptrAddrTrackGroupIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 3, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrAddrTrackGroupIndex.setDescription('This object identifies the group containing the\n port for which this entry contains information.')
rptrAddrTrackPortIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 3, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrAddrTrackPortIndex.setDescription('This object identifies the port within the group\n for which this entry contains information.')
rptrAddrTrackLastSourceAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 3, 3, 1, 1, 3), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrAddrTrackLastSourceAddress.setDescription('********* THIS OBJECT IS DEPRECATED **********\n This object is the SourceAddress of the last\n readable frame (i.e., counted by\n rptrMonitorPortReadableFrames) received by this\n port.\n\n This object has been deprecated because its value\n is undefined when no frames have been observed on\n this port. The replacement object is\n rptrAddrTrackNewLastSrcAddress.')
rptrAddrTrackSourceAddrChanges = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 3, 3, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrAddrTrackSourceAddrChanges.setDescription('This counter is incremented by one for each time\n that the rptrAddrTrackLastSourceAddress attribute\n for this port has changed.\n\n This may indicate whether a link is connected to a\n single DTE or another multi-user segment.\n\n A discontinuity may occur in the value when the\n value of object rptrMonitorPortLastChange changes.\n\n The approximate minimum time for rollover of this\n counter is 81 hours in a 10Mb/s repeater.')
rptrAddrTrackNewLastSrcAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 3, 3, 1, 1, 5), OptMacAddr()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrAddrTrackNewLastSrcAddress.setDescription('This object is the SourceAddress of the last\n readable frame (i.e., counted by\n rptrMonitorPortReadableFrames) received by this\n port. If no frames have been received by this\n port since the agent began monitoring the port\n activity, the agent shall return a string of\n length zero.')
rptrAddrTrackCapacity = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 3, 3, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrAddrTrackCapacity.setDescription('The maximum number of addresses that can be\n detected on this port. This value indicates\n to the maximum number of entries in the\n rptrExtAddrTrackTable relative to this port.\n\n If this object has the value of 1, the agent\n implements only the LastSourceAddress mechanism\n described by RFC 1368 or RFC 1516.')
rptrExtAddrTrackTable = MibTable((1, 3, 6, 1, 2, 1, 22, 3, 3, 2), )
if mibBuilder.loadTexts: rptrExtAddrTrackTable.setDescription('A table to extend the address tracking table (i.e.,\n rptrAddrTrackTable) with a list of source MAC\n addresses that were recently received on each port.\n The number of ports is the same as the number\n of entries in table rptrPortTable. The number of\n entries in this table depends on the agent/repeater\n implementation and the number of different\n addresses received on each port.\n\n The first entry for each port contains\n the same MAC address that is given by the\n rptrAddrTrackNewLastSrcAddress for that port.\n\n Entries in this table for a particular port are\n retained when that port is switched from one\n repeater to another.\n\n The ordering of MAC addresses listed for a\n particular port is implementation dependent.')
rptrExtAddrTrackEntry = MibTableRow((1, 3, 6, 1, 2, 1, 22, 3, 3, 2, 1), ).setIndexNames((0, "SNMP-REPEATER-MIB", "rptrAddrTrackGroupIndex"), (0, "SNMP-REPEATER-MIB", "rptrAddrTrackPortIndex"), (0, "SNMP-REPEATER-MIB", "rptrExtAddrTrackMacIndex"))
if mibBuilder.loadTexts: rptrExtAddrTrackEntry.setDescription('A row in the table of extended address tracking\n information for ports. Entries can not be directly\n created or deleted via SNMP operations.')
rptrExtAddrTrackMacIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 3, 3, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrExtAddrTrackMacIndex.setDescription('The index of a source MAC address seen on\n the port.\n\n The ordering of MAC addresses listed for a\n particular port is implementation dependent.\n\n There is no implied relationship between a\n particular index and a particular MAC\n address. The index for a particular MAC\n address may change without notice.')
rptrExtAddrTrackSourceAddress = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 3, 3, 2, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrExtAddrTrackSourceAddress.setDescription('The source MAC address from a readable frame\n (i.e., counted by rptrMonitorPortReadableFrames)\n recently received by the port.')
rptrTopNPortControlTable = MibTable((1, 3, 6, 1, 2, 1, 22, 4, 3, 1), )
if mibBuilder.loadTexts: rptrTopNPortControlTable.setDescription("A table of control records for reports on the top `N'\n ports for the rate of a selected counter. The number\n of entries depends on the configuration of the agent.\n The maximum number of entries is implementation\n dependent.")
rptrTopNPortControlEntry = MibTableRow((1, 3, 6, 1, 2, 1, 22, 4, 3, 1, 1), ).setIndexNames((0, "SNMP-REPEATER-MIB", "rptrTopNPortControlIndex"))
if mibBuilder.loadTexts: rptrTopNPortControlEntry.setDescription('A set of parameters that control the creation of a\n report of the top N ports according to several metrics.')
rptrTopNPortControlIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 4, 3, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrTopNPortControlIndex.setDescription('An index that uniquely identifies an entry in the\n rptrTopNPortControl table. Each such entry defines\n one top N report prepared for a repeater or system.')
rptrTopNPortRepeaterId = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 4, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0,2147483647))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rptrTopNPortRepeaterId.setDescription("Identifies the repeater for which a top N report will\n be prepared (see rptrInfoId). If the value of this\n object is positive, only ports assigned to this repeater\n will be used to form the list in which to order the\n Top N table. If this value is zero, all ports will be\n eligible for inclusion on the list.\n\n The value of this object may not be modified if the\n associated rptrTopNPortRowStatus object is equal to\n active(1).\n If, for a particular row in this table, the repeater\n specified by the value of this object goes away (is\n removed from the rptrInfoTable) while the associated\n rptrTopNPortRowStatus object is equal to active(1),\n the row in this table is preserved by the agent but\n the value of rptrTopNPortRowStatus is changed to\n notInService(2), and the agent may time out the row\n if appropriate. If the specified repeater comes\n back (reappears in the rptrInfoTable) before the row\n has been timed out, the management station must set\n the value of the rptrTopNPortRowStatus object back\n to active(1) if desired (the agent doesn't do this\n automatically).")
rptrTopNPortRateBase = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 4, 3, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,))).clone(namedValues=NamedValues(("readableFrames", 1), ("readableOctets", 2), ("fcsErrors", 3), ("alignmentErrors", 4), ("frameTooLongs", 5), ("shortEvents", 6), ("runts", 7), ("collisions", 8), ("lateEvents", 9), ("veryLongEvents", 10), ("dataRateMismatches", 11), ("autoPartitions", 12), ("totalErrors", 13), ("isolates", 14), ("symbolErrors", 15),))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rptrTopNPortRateBase.setDescription('The monitored variable, which the rptrTopNPortRate\n variable is based upon.\n\n The value of this object may not be modified if\n the associated rptrTopNPortRowStatus object has\n a value of active(1).')
rptrTopNPortTimeRemaining = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 4, 3, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0,2147483647))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rptrTopNPortTimeRemaining.setDescription('The number of seconds left in the report\n currently being collected. When this object\n is modified by the management station, a new\n collection is started, possibly aborting a\n currently running report. The new value is\n used as the requested duration of this report,\n which is loaded into the associated\n rptrTopNPortDuration object.\n\n When this object is set to a non-zero value,\n any associated rptrTopNPortEntries shall be\n made inaccessible by the agent. While the value\n of this object is non-zero, it decrements by one\n per second until it reaches zero. During this\n time, all associated rptrTopNPortEntries shall\n remain inaccessible. At the time that this object\n decrements to zero, the report is made accessible\n in the rptrTopNPortTable. Thus, the rptrTopNPort\n table needs to be created only at the end of the\n collection interval.\n\n If the value of this object is set to zero\n while the associated report is running, the\n running report is aborted and no associated\n rptrTopNPortEntries are created.')
rptrTopNPortDuration = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 4, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0,2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrTopNPortDuration.setDescription('The number of seconds that this report has\n collected during the last sampling interval,\n or if this report is currently being collected,\n the number of seconds that this report is being\n collected during this sampling interval.\n\n When the associated rptrTopNPortTimeRemaining\n object is set, this object shall be set by the\n agent to the same value and shall not be modified\n until the next time the rptrTopNPortTimeRemaining\n is set.\n\n This value shall be zero if no reports have been\n requested for this rptrTopNPortControlEntry.')
rptrTopNPortRequestedSize = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 4, 3, 1, 1, 6), Integer32().clone(10)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rptrTopNPortRequestedSize.setDescription('The maximum number of repeater ports requested\n for the Top N Table.\n\n When this object is created or modified, the\n agent should set rptrTopNPortGrantedSize as close\n to this object as is possible for the particular\n implementation and available resources.')
rptrTopNPortGrantedSize = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 4, 3, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0,65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrTopNPortGrantedSize.setDescription('The maximum number of repeater ports in the\n top N table.\n\n When the associated rptrTopNPortRequestedSize object is\n created or modified, the agent should set this object as\n closely to the requested value as is possible for the\n particular implementation and available resources. The\n agent must not lower this value except as a result of a\n set to the associated rptrTopNPortRequestedSize object.')
rptrTopNPortStartTime = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 4, 3, 1, 1, 8), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrTopNPortStartTime.setDescription('The value of sysUpTime when this top N report was\n last started. In other words, this is the time that\n the associated rptrTopNPortTimeRemaining object was\n modified to start the requested report.\n\n If the report has not yet been started, the value\n of this object is zero.')
rptrTopNPortOwner = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 4, 3, 1, 1, 9), OwnerString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rptrTopNPortOwner.setDescription('The entity that configured this entry and is\n using the resources assigned to it.')
rptrTopNPortRowStatus = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 4, 3, 1, 1, 10), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: rptrTopNPortRowStatus.setDescription('The status of this row.\n\n If the value of this object is not equal to\n active(1), all associated entries in the\n rptrTopNPortTable shall be deleted by the\n agent.')
rptrTopNPortTable = MibTable((1, 3, 6, 1, 2, 1, 22, 4, 3, 2), )
if mibBuilder.loadTexts: rptrTopNPortTable.setDescription("A table of reports for the top `N' ports based on\n setting of associated control table entries. The\n maximum number of entries depends on the number\n of entries in table rptrTopNPortControlTable and\n the value of object rptrTopNPortGrantedSize for\n each entry.\n\n For each entry in the rptrTopNPortControlTable,\n repeater ports with the highest value of\n rptrTopNPortRate shall be placed in this table\n in decreasing order of that rate until there is\n no more room or until there are no more ports.")
rptrTopNPortEntry = MibTableRow((1, 3, 6, 1, 2, 1, 22, 4, 3, 2, 1), ).setIndexNames((0, "SNMP-REPEATER-MIB", "rptrTopNPortControlIndex"), (0, "SNMP-REPEATER-MIB", "rptrTopNPortIndex"))
if mibBuilder.loadTexts: rptrTopNPortEntry.setDescription('A set of statistics for a repeater port that is\n part of a top N report.')
rptrTopNPortIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 4, 3, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrTopNPortIndex.setDescription('An index that uniquely identifies an entry in\n the rptrTopNPort table among those in the same\n report. This index is between 1 and N, where N\n is the number of entries in this report. Increasing\n values of rptrTopNPortIndex shall be assigned to\n entries with decreasing values of rptrTopNPortRate\n until index N is assigned to the entry with the\n lowest value of rptrTopNPortRate or there are no\n more rptrTopNPortEntries.\n\n No ports are included in a report where their\n value of rptrTopNPortRate would be zero.')
rptrTopNPortGroupIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 4, 3, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrTopNPortGroupIndex.setDescription('This object identifes the group containing\n the port for this entry. (See also object\n type rptrGroupIndex.)')
rptrTopNPortPortIndex = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 4, 3, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1,2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrTopNPortPortIndex.setDescription('The index of the repeater port.\n (See object type rptrPortIndex.)')
rptrTopNPortRate = MibTableColumn((1, 3, 6, 1, 2, 1, 22, 4, 3, 2, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rptrTopNPortRate.setDescription("The amount of change in the selected variable\n during this sampling interval for the identified\n port. The selected variable is that port's\n instance of the object selected by\n rptrTopNPortRateBase.")
rptrHealth = NotificationType((1, 3, 6, 1, 2, 1, 22, 0, 1)).setObjects(*(("SNMP-REPEATER-MIB", "rptrOperStatus"),))
if mibBuilder.loadTexts: rptrHealth.setDescription("********* THIS OBJECT IS DEPRECATED **********\n\n In a system containing a single managed repeater,\n the rptrHealth notification conveys information\n related to the operational status of the repeater.\n It is sent either when the value of\n rptrOperStatus changes, or upon completion of a\n non-disruptive test.\n\n The rptrHealth notification must contain the\n rptrOperStatus object. The agent may optionally\n include the rptrHealthText object in the varBind\n list. See the rptrOperStatus and rptrHealthText\n objects for descriptions of the information that\n is sent.\n\n The agent must throttle the generation of\n consecutive rptrHealth traps so that there is at\n least a five-second gap between traps of this\n type. When traps are throttled, they are dropped,\n not queued for sending at a future time. (Note\n that 'generating' a trap means sending to all\n configured recipients.)")
rptrGroupChange = NotificationType((1, 3, 6, 1, 2, 1, 22, 0, 2)).setObjects(*(("SNMP-REPEATER-MIB", "rptrGroupIndex"),))
if mibBuilder.loadTexts: rptrGroupChange.setDescription("********* THIS OBJECT IS DEPRECATED **********\n\n In a system containing a single managed repeater,\n this notification is sent when a change occurs in the\n group structure of the repeater. This occurs only\n when a group is logically or physically removed\n from or added to a repeater. The varBind list\n contains the identifier of the group that was\n removed or added.\n\n The agent must throttle the generation of\n consecutive rptrGroupChange traps for the same\n group so that there is at least a five-second gap\n between traps of this type. When traps are\n throttled, they are dropped, not queued for\n sending at a future time. (Note that 'generating'\n a trap means sending to all configured\n recipients.)")
rptrResetEvent = NotificationType((1, 3, 6, 1, 2, 1, 22, 0, 3)).setObjects(*(("SNMP-REPEATER-MIB", "rptrOperStatus"),))
if mibBuilder.loadTexts: rptrResetEvent.setDescription("********* THIS OBJECT IS DEPRECATED **********\n\n In a system containing a single managed repeater-unit,\n the rptrResetEvent notification conveys information\n related to the operational status of the repeater.\n This trap is sent on completion of a repeater\n reset action. A repeater reset action is defined\n as an a transition to the START state of Fig 9-2\n in section 9 [IEEE 802.3 Std], when triggered by a\n management command (e.g., an SNMP Set on the\n rptrReset object).\n\n The agent must throttle the generation of\n consecutive rptrResetEvent traps so that there is\n at least a five-second gap between traps of this\n type. When traps are throttled, they are dropped,\n not queued for sending at a future time. (Note\n that 'generating' a trap means sending to all\n configured recipients.)\n\n The rptrResetEvent trap is not sent when the agent\n restarts and sends an SNMP coldStart or warmStart\n trap. However, it is recommended that a repeater\n agent send the rptrOperStatus object as an\n optional object with its coldStart and warmStart\n trap PDUs.\n\n The rptrOperStatus object must be included in the\n varbind list sent with this trap. The agent may\n optionally include the rptrHealthText object as\n well.")
rptrInfoHealth = NotificationType((1, 3, 6, 1, 2, 1, 22, 0, 4)).setObjects(*(("SNMP-REPEATER-MIB", "rptrInfoOperStatus"),))
if mibBuilder.loadTexts: rptrInfoHealth.setDescription("In a system containing multiple managed repeaters,\n the rptrInfoHealth notification conveys information\n related to the operational status of a repeater.\n It is sent either when the value of rptrInfoOperStatus\n changes, or upon completion of a non-disruptive test.\n\n The agent must throttle the generation of\n consecutive rptrInfoHealth notifications for\n the same repeater so that there is at least\n a five-second gap between notifications of this type.\n When notifications are throttled, they are dropped,\n not queued for sending at a future time. (Note\n that 'generating' a notification means sending\n to all configured recipients.)")
rptrInfoResetEvent = NotificationType((1, 3, 6, 1, 2, 1, 22, 0, 5)).setObjects(*(("SNMP-REPEATER-MIB", "rptrInfoOperStatus"),))
if mibBuilder.loadTexts: rptrInfoResetEvent.setDescription("In a system containing multiple managed\n repeaters, the rptrInfoResetEvent notification\n conveys information related to the operational\n status of a repeater. This notification is sent\n on completion of a repeater reset action. A\n repeater reset action is defined as a transition\n to the START state of Fig 9-2 in section 9 of\n [IEEE 802.3 Std], when triggered by a management\n command (e.g., an SNMP Set on the rptrInfoReset\n object).\n\n The agent must throttle the generation of\n consecutive rptrInfoResetEvent notifications for\n a single repeater so that there is at least\n a five-second gap between notifications of\n this type. When notifications are throttled,\n they are dropped, not queued for sending at\n a future time. (Note that 'generating' a\n notification means sending to all configured\n recipients.)\n\n The rptrInfoResetEvent is not sent when the\n agent restarts and sends an SNMP coldStart or\n warmStart trap. However, it is recommended that\n a repeater agent send the rptrInfoOperStatus\n object as an optional object with its coldStart\n and warmStart trap PDUs.")
snmpRptrModConf = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 5, 1))
snmpRptrModCompls = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 5, 1, 1))
snmpRptrModObjGrps = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 5, 1, 2))
snmpRptrModNotGrps = MibIdentifier((1, 3, 6, 1, 2, 1, 22, 5, 1, 3))
snmpRptrGrpBasic1516 = ObjectGroup((1, 3, 6, 1, 2, 1, 22, 5, 1, 2, 1)).setObjects(*(("SNMP-REPEATER-MIB", "rptrGroupCapacity"), ("SNMP-REPEATER-MIB", "rptrOperStatus"), ("SNMP-REPEATER-MIB", "rptrHealthText"), ("SNMP-REPEATER-MIB", "rptrReset"), ("SNMP-REPEATER-MIB", "rptrNonDisruptTest"), ("SNMP-REPEATER-MIB", "rptrTotalPartitionedPorts"), ("SNMP-REPEATER-MIB", "rptrGroupIndex"), ("SNMP-REPEATER-MIB", "rptrGroupDescr"), ("SNMP-REPEATER-MIB", "rptrGroupObjectID"), ("SNMP-REPEATER-MIB", "rptrGroupOperStatus"), ("SNMP-REPEATER-MIB", "rptrGroupLastOperStatusChange"), ("SNMP-REPEATER-MIB", "rptrGroupPortCapacity"), ("SNMP-REPEATER-MIB", "rptrPortGroupIndex"), ("SNMP-REPEATER-MIB", "rptrPortIndex"), ("SNMP-REPEATER-MIB", "rptrPortAdminStatus"), ("SNMP-REPEATER-MIB", "rptrPortAutoPartitionState"), ("SNMP-REPEATER-MIB", "rptrPortOperStatus"),))
if mibBuilder.loadTexts: snmpRptrGrpBasic1516.setDescription('********* THIS GROUP IS DEPRECATED **********\n\n Basic group from RFCs 1368 and 1516.\n\n NOTE: this object group is DEPRECATED and replaced\n with snmpRptrGrpBasic.')
snmpRptrGrpMonitor1516 = ObjectGroup((1, 3, 6, 1, 2, 1, 22, 5, 1, 2, 2)).setObjects(*(("SNMP-REPEATER-MIB", "rptrMonitorTransmitCollisions"), ("SNMP-REPEATER-MIB", "rptrMonitorGroupIndex"), ("SNMP-REPEATER-MIB", "rptrMonitorGroupTotalFrames"), ("SNMP-REPEATER-MIB", "rptrMonitorGroupTotalOctets"), ("SNMP-REPEATER-MIB", "rptrMonitorGroupTotalErrors"), ("SNMP-REPEATER-MIB", "rptrMonitorPortGroupIndex"), ("SNMP-REPEATER-MIB", "rptrMonitorPortIndex"), ("SNMP-REPEATER-MIB", "rptrMonitorPortReadableFrames"), ("SNMP-REPEATER-MIB", "rptrMonitorPortReadableOctets"), ("SNMP-REPEATER-MIB", "rptrMonitorPortFCSErrors"), ("SNMP-REPEATER-MIB", "rptrMonitorPortAlignmentErrors"), ("SNMP-REPEATER-MIB", "rptrMonitorPortFrameTooLongs"), ("SNMP-REPEATER-MIB", "rptrMonitorPortShortEvents"), ("SNMP-REPEATER-MIB", "rptrMonitorPortRunts"), ("SNMP-REPEATER-MIB", "rptrMonitorPortCollisions"), ("SNMP-REPEATER-MIB", "rptrMonitorPortLateEvents"), ("SNMP-REPEATER-MIB", "rptrMonitorPortVeryLongEvents"), ("SNMP-REPEATER-MIB", "rptrMonitorPortDataRateMismatches"), ("SNMP-REPEATER-MIB", "rptrMonitorPortAutoPartitions"), ("SNMP-REPEATER-MIB", "rptrMonitorPortTotalErrors"),))
if mibBuilder.loadTexts: snmpRptrGrpMonitor1516.setDescription('********* THIS GROUP IS DEPRECATED **********\n\n Monitor group from RFCs 1368 and 1516.\n\n NOTE: this object group is DEPRECATED and replaced\n with snmpRptrGrpMonitor.')
snmpRptrGrpAddrTrack1368 = ObjectGroup((1, 3, 6, 1, 2, 1, 22, 5, 1, 2, 3)).setObjects(*(("SNMP-REPEATER-MIB", "rptrAddrTrackGroupIndex"), ("SNMP-REPEATER-MIB", "rptrAddrTrackPortIndex"), ("SNMP-REPEATER-MIB", "rptrAddrTrackLastSourceAddress"), ("SNMP-REPEATER-MIB", "rptrAddrTrackSourceAddrChanges"),))
if mibBuilder.loadTexts: snmpRptrGrpAddrTrack1368.setDescription('Address tracking group from RFC 1368.\n\n NOTE: this object group is OBSOLETE and replaced\n with snmpRptrGrpAddrTrack1516.')
snmpRptrGrpAddrTrack1516 = ObjectGroup((1, 3, 6, 1, 2, 1, 22, 5, 1, 2, 4)).setObjects(*(("SNMP-REPEATER-MIB", "rptrAddrTrackGroupIndex"), ("SNMP-REPEATER-MIB", "rptrAddrTrackPortIndex"), ("SNMP-REPEATER-MIB", "rptrAddrTrackLastSourceAddress"), ("SNMP-REPEATER-MIB", "rptrAddrTrackSourceAddrChanges"), ("SNMP-REPEATER-MIB", "rptrAddrTrackNewLastSrcAddress"),))
if mibBuilder.loadTexts: snmpRptrGrpAddrTrack1516.setDescription('********* THIS GROUP IS DEPRECATED **********\n Address tracking group from RFC 1516.\n\n NOTE: this object group is DEPRECATED and\n replaced with snmpRptrGrpAddrTrack.')
snmpRptrGrpBasic = ObjectGroup((1, 3, 6, 1, 2, 1, 22, 5, 1, 2, 5)).setObjects(*(("SNMP-REPEATER-MIB", "rptrGroupIndex"), ("SNMP-REPEATER-MIB", "rptrGroupObjectID"), ("SNMP-REPEATER-MIB", "rptrGroupOperStatus"), ("SNMP-REPEATER-MIB", "rptrGroupPortCapacity"), ("SNMP-REPEATER-MIB", "rptrPortGroupIndex"), ("SNMP-REPEATER-MIB", "rptrPortIndex"), ("SNMP-REPEATER-MIB", "rptrPortAdminStatus"), ("SNMP-REPEATER-MIB", "rptrPortAutoPartitionState"), ("SNMP-REPEATER-MIB", "rptrPortOperStatus"), ("SNMP-REPEATER-MIB", "rptrPortRptrId"), ("SNMP-REPEATER-MIB", "rptrInfoId"), ("SNMP-REPEATER-MIB", "rptrInfoRptrType"), ("SNMP-REPEATER-MIB", "rptrInfoOperStatus"), ("SNMP-REPEATER-MIB", "rptrInfoReset"), ("SNMP-REPEATER-MIB", "rptrInfoPartitionedPorts"), ("SNMP-REPEATER-MIB", "rptrInfoLastChange"),))
if mibBuilder.loadTexts: snmpRptrGrpBasic.setDescription('Basic group for a system with one or more\n repeater-units in multi-segment (post-RFC 1516)\n version of the MIB module.')
snmpRptrGrpMonitor = ObjectGroup((1, 3, 6, 1, 2, 1, 22, 5, 1, 2, 6)).setObjects(*(("SNMP-REPEATER-MIB", "rptrMonitorPortGroupIndex"), ("SNMP-REPEATER-MIB", "rptrMonitorPortIndex"), ("SNMP-REPEATER-MIB", "rptrMonitorPortReadableFrames"), ("SNMP-REPEATER-MIB", "rptrMonitorPortReadableOctets"), ("SNMP-REPEATER-MIB", "rptrMonitorPortFCSErrors"), ("SNMP-REPEATER-MIB", "rptrMonitorPortAlignmentErrors"), ("SNMP-REPEATER-MIB", "rptrMonitorPortFrameTooLongs"), ("SNMP-REPEATER-MIB", "rptrMonitorPortShortEvents"), ("SNMP-REPEATER-MIB", "rptrMonitorPortRunts"), ("SNMP-REPEATER-MIB", "rptrMonitorPortCollisions"), ("SNMP-REPEATER-MIB", "rptrMonitorPortLateEvents"), ("SNMP-REPEATER-MIB", "rptrMonitorPortVeryLongEvents"), ("SNMP-REPEATER-MIB", "rptrMonitorPortDataRateMismatches"), ("SNMP-REPEATER-MIB", "rptrMonitorPortAutoPartitions"), ("SNMP-REPEATER-MIB", "rptrMonitorPortTotalErrors"), ("SNMP-REPEATER-MIB", "rptrMonitorPortLastChange"), ("SNMP-REPEATER-MIB", "rptrMonTxCollisions"), ("SNMP-REPEATER-MIB", "rptrMonTotalFrames"), ("SNMP-REPEATER-MIB", "rptrMonTotalErrors"), ("SNMP-REPEATER-MIB", "rptrMonTotalOctets"),))
if mibBuilder.loadTexts: snmpRptrGrpMonitor.setDescription('Monitor group for a system with one or more\n repeater-units in multi-segment (post-RFC 1516)\n version of the MIB module.')
snmpRptrGrpMonitor100 = ObjectGroup((1, 3, 6, 1, 2, 1, 22, 5, 1, 2, 7)).setObjects(*(("SNMP-REPEATER-MIB", "rptrMonitorPortIsolates"), ("SNMP-REPEATER-MIB", "rptrMonitorPortSymbolErrors"), ("SNMP-REPEATER-MIB", "rptrMonitorPortUpper32Octets"), ("SNMP-REPEATER-MIB", "rptrMonUpper32TotalOctets"),))
if mibBuilder.loadTexts: snmpRptrGrpMonitor100.setDescription('Monitor group for 100Mb/s ports and repeaters\n in a system with one or more repeater-units in\n multi-segment (post-RFC 1516) version of the MIB\n module. Systems which support Counter64 should\n also implement snmpRptrGrpMonitor100w64.')
snmpRptrGrpMonitor100w64 = ObjectGroup((1, 3, 6, 1, 2, 1, 22, 5, 1, 2, 8)).setObjects(*(("SNMP-REPEATER-MIB", "rptrMonitorPortHCReadableOctets"), ("SNMP-REPEATER-MIB", "rptrMonHCTotalOctets"),))
if mibBuilder.loadTexts: snmpRptrGrpMonitor100w64.setDescription('Monitor group for 100Mb/s ports and repeaters in a\n system with one or more repeater-units and support\n for Counter64.')
snmpRptrGrpAddrTrack = ObjectGroup((1, 3, 6, 1, 2, 1, 22, 5, 1, 2, 9)).setObjects(*(("SNMP-REPEATER-MIB", "rptrAddrTrackGroupIndex"), ("SNMP-REPEATER-MIB", "rptrAddrTrackPortIndex"), ("SNMP-REPEATER-MIB", "rptrAddrTrackSourceAddrChanges"), ("SNMP-REPEATER-MIB", "rptrAddrTrackNewLastSrcAddress"), ("SNMP-REPEATER-MIB", "rptrAddrTrackCapacity"),))
if mibBuilder.loadTexts: snmpRptrGrpAddrTrack.setDescription('Passive address tracking group for post-RFC 1516\n version of the MIB module.')
snmpRptrGrpExtAddrTrack = ObjectGroup((1, 3, 6, 1, 2, 1, 22, 5, 1, 2, 10)).setObjects(*(("SNMP-REPEATER-MIB", "rptrExtAddrTrackMacIndex"), ("SNMP-REPEATER-MIB", "rptrExtAddrTrackSourceAddress"),))
if mibBuilder.loadTexts: snmpRptrGrpExtAddrTrack.setDescription('Extended passive address tracking group for\n a system with one or more repeater-units in\n post-RFC 1516 version of the MIB module.')
snmpRptrGrpRptrAddrSearch = ObjectGroup((1, 3, 6, 1, 2, 1, 22, 5, 1, 2, 11)).setObjects(*(("SNMP-REPEATER-MIB", "rptrAddrSearchLock"), ("SNMP-REPEATER-MIB", "rptrAddrSearchStatus"), ("SNMP-REPEATER-MIB", "rptrAddrSearchAddress"), ("SNMP-REPEATER-MIB", "rptrAddrSearchState"), ("SNMP-REPEATER-MIB", "rptrAddrSearchGroup"), ("SNMP-REPEATER-MIB", "rptrAddrSearchPort"), ("SNMP-REPEATER-MIB", "rptrAddrSearchOwner"),))
if mibBuilder.loadTexts: snmpRptrGrpRptrAddrSearch.setDescription('Active MAC address search group and topology\n mapping support for repeaters.')
snmpRptrGrpTopNPort = ObjectGroup((1, 3, 6, 1, 2, 1, 22, 5, 1, 2, 12)).setObjects(*(("SNMP-REPEATER-MIB", "rptrTopNPortControlIndex"), ("SNMP-REPEATER-MIB", "rptrTopNPortRepeaterId"), ("SNMP-REPEATER-MIB", "rptrTopNPortRateBase"), ("SNMP-REPEATER-MIB", "rptrTopNPortTimeRemaining"), ("SNMP-REPEATER-MIB", "rptrTopNPortDuration"), ("SNMP-REPEATER-MIB", "rptrTopNPortRequestedSize"), ("SNMP-REPEATER-MIB", "rptrTopNPortGrantedSize"), ("SNMP-REPEATER-MIB", "rptrTopNPortStartTime"), ("SNMP-REPEATER-MIB", "rptrTopNPortOwner"), ("SNMP-REPEATER-MIB", "rptrTopNPortRowStatus"), ("SNMP-REPEATER-MIB", "rptrTopNPortIndex"), ("SNMP-REPEATER-MIB", "rptrTopNPortGroupIndex"), ("SNMP-REPEATER-MIB", "rptrTopNPortPortIndex"), ("SNMP-REPEATER-MIB", "rptrTopNPortRate"),))
if mibBuilder.loadTexts: snmpRptrGrpTopNPort.setDescription("Top `N' group for repeater ports.")
snmpRptrModComplRFC1368 = ModuleCompliance((1, 3, 6, 1, 2, 1, 22, 5, 1, 1, 1)).setObjects(*(("SNMP-REPEATER-MIB", "snmpRptrGrpBasic1516"), ("SNMP-REPEATER-MIB", "snmpRptrGrpMonitor1516"), ("SNMP-REPEATER-MIB", "snmpRptrGrpAddrTrack1368"),))
if mibBuilder.loadTexts: snmpRptrModComplRFC1368.setDescription('Compliance for RFC 1368.\n\n NOTE: this module compliance is OBSOLETE and\n replaced by snmpRptrModComplRFC1516.')
snmpRptrModComplRFC1516 = ModuleCompliance((1, 3, 6, 1, 2, 1, 22, 5, 1, 1, 2)).setObjects(*(("SNMP-REPEATER-MIB", "snmpRptrGrpBasic1516"), ("SNMP-REPEATER-MIB", "snmpRptrGrpMonitor1516"), ("SNMP-REPEATER-MIB", "snmpRptrGrpAddrTrack1516"),))
if mibBuilder.loadTexts: snmpRptrModComplRFC1516.setDescription('********* THIS COMPLIANCE IS DEPRECATED **********\n\n Compliance for RFC 1516 and for backwards\n compatibility with single-repeater,\n 10Mb/s-only implementations.')
snmpRptrModCompl = ModuleCompliance((1, 3, 6, 1, 2, 1, 22, 5, 1, 1, 3)).setObjects(*(("SNMP-REPEATER-MIB", "snmpRptrGrpBasic"), ("SNMP-REPEATER-MIB", "snmpRptrGrpMonitor"), ("SNMP-REPEATER-MIB", "snmpRptrGrpAddrTrack"), ("SNMP-REPEATER-MIB", "snmpRptrGrpMonitor100"), ("SNMP-REPEATER-MIB", "snmpRptrGrpMonitor100w64"), ("SNMP-REPEATER-MIB", "snmpRptrGrpExtAddrTrack"), ("SNMP-REPEATER-MIB", "snmpRptrGrpRptrAddrSearch"), ("SNMP-REPEATER-MIB", "snmpRptrGrpTopNPort"),))
if mibBuilder.loadTexts: snmpRptrModCompl.setDescription('Compliance for the multi-segment version of the\n MIB module for a system with one or more\n repeater-units.')
mibBuilder.exportSymbols("SNMP-REPEATER-MIB", rptrTopNPortDuration=rptrTopNPortDuration, rptrGroupLastOperStatusChange=rptrGroupLastOperStatusChange, snmpRptrGrpTopNPort=snmpRptrGrpTopNPort, rptrAllRptrInfo=rptrAllRptrInfo, rptrMonitorGroupTotalErrors=rptrMonitorGroupTotalErrors, rptrGroupPortCapacity=rptrGroupPortCapacity, rptrTopNPortRowStatus=rptrTopNPortRowStatus, rptrGroupIndex=rptrGroupIndex, rptrMonitorPortLastChange=rptrMonitorPortLastChange, rptrMonitor100PortTable=rptrMonitor100PortTable, rptrReset=rptrReset, rptrAddrTrackNewLastSrcAddress=rptrAddrTrackNewLastSrcAddress, rptrAddrSearchTable=rptrAddrSearchTable, snmpRptrGrpBasic=snmpRptrGrpBasic, rptrGroupTable=rptrGroupTable, rptrGroupOperStatus=rptrGroupOperStatus, rptrMon100Table=rptrMon100Table, rptrTopNPortIndex=rptrTopNPortIndex, rptrAddrTrackEntry=rptrAddrTrackEntry, rptrAddrTrackRptrInfo=rptrAddrTrackRptrInfo, snmpDot3RptrMgt=snmpDot3RptrMgt, rptrAddrTrackCapacity=rptrAddrTrackCapacity, rptrMonTotalFrames=rptrMonTotalFrames, rptrTopNPortStartTime=rptrTopNPortStartTime, rptrMon100Entry=rptrMon100Entry, rptrTopNPortInfo=rptrTopNPortInfo, rptrNonDisruptTest=rptrNonDisruptTest, rptrAddrSearchOwner=rptrAddrSearchOwner, rptrGroupCapacity=rptrGroupCapacity, rptrInfoTable=rptrInfoTable, rptrAddrSearchAddress=rptrAddrSearchAddress, rptrExtAddrTrackMacIndex=rptrExtAddrTrackMacIndex, rptrAddrSearchPort=rptrAddrSearchPort, rptrMonTotalOctets=rptrMonTotalOctets, rptrPortIndex=rptrPortIndex, rptrMonTable=rptrMonTable, snmpRptrGrpMonitor100=snmpRptrGrpMonitor100, rptrMonitorPortVeryLongEvents=rptrMonitorPortVeryLongEvents, rptrMonitorPortReadableOctets=rptrMonitorPortReadableOctets, rptrInfoOperStatus=rptrInfoOperStatus, snmpRptrModComplRFC1368=snmpRptrModComplRFC1368, PYSNMP_MODULE_ID=snmpRptrMod, rptrGroupEntry=rptrGroupEntry, snmpRptrModObjGrps=snmpRptrModObjGrps, snmpRptrGrpBasic1516=snmpRptrGrpBasic1516, rptrTopNPortEntry=rptrTopNPortEntry, rptrTopNPortRate=rptrTopNPortRate, rptrPortAdminStatus=rptrPortAdminStatus, rptrMonitorPortGroupIndex=rptrMonitorPortGroupIndex, rptrMonitorPortRunts=rptrMonitorPortRunts, rptrExtAddrTrackTable=rptrExtAddrTrackTable, rptrTotalPartitionedPorts=rptrTotalPartitionedPorts, rptrAddrTrackGroupInfo=rptrAddrTrackGroupInfo, rptrExtAddrTrackEntry=rptrExtAddrTrackEntry, rptrMonTotalErrors=rptrMonTotalErrors, rptrAddrTrackSourceAddrChanges=rptrAddrTrackSourceAddrChanges, rptrMonitorTransmitCollisions=rptrMonitorTransmitCollisions, rptrHealth=rptrHealth, rptrTopNPackage=rptrTopNPackage, rptrInfoRptrType=rptrInfoRptrType, rptrAddrSearchLock=rptrAddrSearchLock, rptrTopNPortPortIndex=rptrTopNPortPortIndex, rptrHealthText=rptrHealthText, rptrMonitorPortReadableFrames=rptrMonitorPortReadableFrames, OptMacAddr=OptMacAddr, rptrMonEntry=rptrMonEntry, snmpRptrGrpAddrTrack1368=snmpRptrGrpAddrTrack1368, snmpRptrModCompl=snmpRptrModCompl, snmpRptrGrpMonitor=snmpRptrGrpMonitor, rptrInfoPartitionedPorts=rptrInfoPartitionedPorts, snmpRptrGrpMonitor100w64=snmpRptrGrpMonitor100w64, rptrMonHCTotalOctets=rptrMonHCTotalOctets, rptrAddrSearchState=rptrAddrSearchState, rptrMonitorAllRptrInfo=rptrMonitorAllRptrInfo, rptrInfoResetEvent=rptrInfoResetEvent, rptrGroupInfo=rptrGroupInfo, rptrAddrSearchGroup=rptrAddrSearchGroup, rptrPortInfo=rptrPortInfo, rptrAddrTrackTable=rptrAddrTrackTable, snmpRptrGrpExtAddrTrack=snmpRptrGrpExtAddrTrack, rptrTopNPortGrantedSize=rptrTopNPortGrantedSize, rptrOperStatus=rptrOperStatus, rptrAddrTrackPortIndex=rptrAddrTrackPortIndex, rptrTopNPortOwner=rptrTopNPortOwner, rptrResetEvent=rptrResetEvent, rptrTopNPortTable=rptrTopNPortTable, rptrBasicPackage=rptrBasicPackage, rptrGroupChange=rptrGroupChange, rptrMonTxCollisions=rptrMonTxCollisions, rptrAddrTrackPackage=rptrAddrTrackPackage, rptrMonitorPortFCSErrors=rptrMonitorPortFCSErrors, rptrTopNPortControlEntry=rptrTopNPortControlEntry, snmpRptrGrpMonitor1516=snmpRptrGrpMonitor1516, rptrPortRptrId=rptrPortRptrId, rptrMonitorPortCollisions=rptrMonitorPortCollisions, rptrMonitorPortUpper32Octets=rptrMonitorPortUpper32Octets, snmpRptrGrpAddrTrack=snmpRptrGrpAddrTrack, rptrMonitorGroupInfo=rptrMonitorGroupInfo, rptrTopNPortControlIndex=rptrTopNPortControlIndex, snmpRptrGrpRptrAddrSearch=snmpRptrGrpRptrAddrSearch, rptrPortGroupIndex=rptrPortGroupIndex, rptrInfoLastChange=rptrInfoLastChange, rptrPortAutoPartitionState=rptrPortAutoPartitionState, rptrMonitorPortLateEvents=rptrMonitorPortLateEvents, rptrInfoId=rptrInfoId, rptrTopNPortTimeRemaining=rptrTopNPortTimeRemaining, rptrInfoHealth=rptrInfoHealth, rptrAddrTrackLastSourceAddress=rptrAddrTrackLastSourceAddress, rptrGroupObjectID=rptrGroupObjectID, rptrMonitorPortInfo=rptrMonitorPortInfo, rptrInfoReset=rptrInfoReset, rptrTopNPortRateBase=rptrTopNPortRateBase, rptrMonitorGroupTable=rptrMonitorGroupTable, snmpRptrModCompls=snmpRptrModCompls, rptrMonitorGroupTotalFrames=rptrMonitorGroupTotalFrames, rptrAddrTrackPortInfo=rptrAddrTrackPortInfo, rptrMonitorGroupEntry=rptrMonitorGroupEntry, rptrMonitorPortEntry=rptrMonitorPortEntry, rptrMonitorPortIsolates=rptrMonitorPortIsolates, snmpRptrModComplRFC1516=snmpRptrModComplRFC1516, rptrAddrSearchStatus=rptrAddrSearchStatus, rptrAddrTrackGroupIndex=rptrAddrTrackGroupIndex, rptrMonitorPortShortEvents=rptrMonitorPortShortEvents, rptrTopNPortRepeaterId=rptrTopNPortRepeaterId, snmpRptrModNotGrps=snmpRptrModNotGrps, rptrMonitorGroupTotalOctets=rptrMonitorGroupTotalOctets, rptrMonitorPackage=rptrMonitorPackage, rptrMonitorPortFrameTooLongs=rptrMonitorPortFrameTooLongs, rptrTopNPortRequestedSize=rptrTopNPortRequestedSize, rptrRptrInfo=rptrRptrInfo, rptrTopNRptrInfo=rptrTopNRptrInfo, snmpRptrGrpAddrTrack1516=snmpRptrGrpAddrTrack1516, rptrGroupDescr=rptrGroupDescr, rptrTopNGroupInfo=rptrTopNGroupInfo, rptrMonitorRptrInfo=rptrMonitorRptrInfo, rptrTopNPortGroupIndex=rptrTopNPortGroupIndex, rptrExtAddrTrackSourceAddress=rptrExtAddrTrackSourceAddress, snmpRptrMod=snmpRptrMod, rptrMonitorGroupIndex=rptrMonitorGroupIndex, rptrAddrSearchEntry=rptrAddrSearchEntry, snmpRptrModConf=snmpRptrModConf, rptrMonitorPortDataRateMismatches=rptrMonitorPortDataRateMismatches, rptrMonitor100PortEntry=rptrMonitor100PortEntry, rptrMonitorPortAlignmentErrors=rptrMonitorPortAlignmentErrors, rptrMonitorPortSymbolErrors=rptrMonitorPortSymbolErrors, rptrMonitorPortTotalErrors=rptrMonitorPortTotalErrors, rptrMonitorPortTable=rptrMonitorPortTable, rptrMonitorPortAutoPartitions=rptrMonitorPortAutoPartitions, rptrPortOperStatus=rptrPortOperStatus, rptrMonitorPortIndex=rptrMonitorPortIndex, rptrPortTable=rptrPortTable, rptrPortEntry=rptrPortEntry, rptrInfoEntry=rptrInfoEntry, rptrMonUpper32TotalOctets=rptrMonUpper32TotalOctets, rptrTopNPortControlTable=rptrTopNPortControlTable, rptrMonitorPortHCReadableOctets=rptrMonitorPortHCReadableOctets)
|
python
|
import os
import unittest
import numpy as np
from pyfluka.utils import PhysicsQuantities as PQ
from pyfluka.plugins.PlotMaker import PlotMaker as PM
from pyfluka.utils.Plotter import PlotConfig as PC
class TestPlotMaker(unittest.TestCase):
def setUp(self):
plotConfigDict = {'type': "2D", 'quantity': "Activity"}
self.plotConfig = [PC("foo", plotConfigDict)]
self.pm = PM([plotConfigDict], "foo")
rawDataArr = np.array([PQ.Activity(i) for i in range(1000)])
self.rawData = {"Det1": {'Activity': rawDataArr, "Binning": [(0, 1, 1), (0, 100, 20), (0, 150, 50)]}}
self.data = np.reshape(rawDataArr, [20, 50, 1]).transpose()
#self.refPlot = plt.pcolor(self.data[0].astype(float))
#@classmethod
#def tearDownClass(cls):
#os.remove("fooDet1")
# @image_comparison(baseline_images=['self.refPlot'])
@unittest.skip("not fully implemented yet")
def testPlotMatrix(self):
plot = self.pm.invoke(self.rawData)
plot.show()
def testAddPlotConfig(self):
self.assertEqual(self.pm.config, self.plotConfig)
@unittest.skip("not running on travis")
def testPlot2DSimpleHasKey(self):
self.pm.invoke(self.rawData)
self.assertTrue(os.path.exists("fooDet1"))
@unittest.skip("not running on travis")
def testInvalidPlotConfigWrongQuantity(self):
plotConfigInvalid = [{"type": "2D"}]
pm = PM(plotConfigInvalid)
self.assertRaises(AttributeError, pm.invoke, self.rawData)
|
python
|
from __future__ import absolute_import, division, print_function
import sys
from iotbx import reflection_file_editor
if __name__ == "__main__" :
reflection_file_editor.run(sys.argv[1:])
|
python
|
from unittest.mock import MagicMock, patch
import pytest
from geniust import constants
from geniust.functions import account
@pytest.mark.parametrize("genius_token", [None, "some_token"])
@pytest.mark.parametrize("spotify_token", [None, "some_token"])
def test_login_choices(update_message, context, genius_token, spotify_token):
update = update_message
context.user_data["genius_token"] = genius_token
context.user_data["spotify_token"] = spotify_token
res = account.login_choices(update, context)
keyboard = update.message.reply_text.call_args[1]["reply_markup"]["inline_keyboard"]
if genius_token and spotify_token:
assert len(keyboard) == 0
elif genius_token or spotify_token:
assert len(keyboard) == 1
else:
assert len(keyboard) == 2
assert res == constants.END
@pytest.mark.parametrize("platform", ["genius", "spotify"])
def test_login(update_callback_query, context, platform):
update = update_callback_query
update.callback_query.data = f"account_login_{platform}"
res = account.login(update, context)
update.callback_query.answer.assert_called_once()
assert res == constants.END
def test_logged_in(update_callback_query, context):
update = update_callback_query
user = context.user_data
user["token"] = "test_token"
res = account.logged_in(update, context)
keyboard = update.callback_query.edit_message_text.call_args[1]["reply_markup"][
"inline_keyboard"
]
assert len(keyboard) == 3
update.callback_query.answer.assert_called_once()
assert res == constants.END
def test_logout(update_callback_query, context):
update = update_callback_query
user = context.user_data
user["token"] = "test_token"
res = account.logout(update, context)
context.bot_data["db"].delete_token.assert_called_once()
update.callback_query.answer.assert_called_once()
assert res == constants.END
@pytest.mark.parametrize("artist_data", [pytest.lazy_fixture("song_dict"), None])
def test_display_account(update_callback_query, context, account_dict, artist_data):
update = update_callback_query
user = context.user_data
user["token"] = "test_token"
genius = MagicMock()
if artist_data is None:
account_dict["user"]["artist"] = None
else:
song = artist_data
account_dict["user"]["artist"] = song["song"]["primary_artist"]
genius().account.return_value = account_dict
with patch("geniust.api.GeniusT", genius):
res = account.display_account(update, context)
update.callback_query.message.delete.assert_called_once()
assert res == constants.END
|
python
|
from insights.parsers import docker_list
from insights.tests import context_wrap
DOCKER_LIST_IMAGES = """
REPOSITORY TAG DIGEST IMAGE ID CREATED VIRTUAL SIZE
rhel6_vsftpd latest <none> 412b684338a1178f0e5ad68a5fd00df01a10a18495959398b2cf92c2033d3d02 37 minutes ago 459.5 MB
<none> <none> <none> 34c167d900afb820ecab622a214ce3207af80ec755c0dcb6165b425087ddbc3a 5 days ago 205.3 MB
<none> <none> <none> 76e65756ff110ca5ea54ac02733fe04301b33a9190689eb524dd5aa18843996a 5 days ago 205.3 MB
""".strip()
DOCKER_LIST_CONTAINERS = """
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES SIZE
03e2861336a76e29155836113ff6560cb70780c32f95062642993b2b3d0fc216 rhel7_httpd "/usr/sbin/httpd -DFOREGROUND" 45 seconds ago Up 37 seconds 0.0.0.0:8080->80/tcp angry_saha 796 B (virtual 669.2 MB)
95516ea08b565e37e2a4bca3333af40a240c368131b77276da8dec629b7fe102 bd8638c869ea40a9269d87e9af6741574562af9ee013e03ac2745fb5f59e2478 "/bin/sh -c 'yum install -y vsftpd-2.2.2-6.el6'" 18 hours ago Exited (137) 18 hours ago tender_rosalind 4.751 MB (virtual 200.4 MB)
""".strip()
DOCKER_LIST_IMAGES_NO_DATA = """
REPOSITORY TAG DIGEST IMAGE ID CREATED VIRTUAL SIZE
"""
def test_docker_list_images():
result = docker_list.DockerListImages(context_wrap(DOCKER_LIST_IMAGES))
# All rows get read:
assert len(result.rows) == 3
# Rows with data are as normal
assert result.rows[0].get("REPOSITORY") == "rhel6_vsftpd"
assert result.rows[0].get("TAG") == "latest"
assert result.rows[0].get("DIGEST") == "<none>"
assert result.rows[0].get("IMAGE ID") == '412b684338a1178f0e5ad68a5fd00df01a10a18495959398b2cf92c2033d3d02'
assert result.rows[0].get("CREATED") == "37 minutes ago"
assert result.rows[0].get("VIRTUAL SIZE") == "459.5 MB"
# Rows with <none> still get processed.
assert result.rows[1].get("REPOSITORY") == "<none>"
assert result.rows[1].get("TAG") == "<none>"
assert result.rows[1].get("IMAGE ID") == '34c167d900afb820ecab622a214ce3207af80ec755c0dcb6165b425087ddbc3a'
assert result.rows[2].get("REPOSITORY") == "<none>"
assert result.rows[2].get("TAG") == "<none>"
assert result.rows[2].get("IMAGE ID") == '76e65756ff110ca5ea54ac02733fe04301b33a9190689eb524dd5aa18843996a'
assert result.data['rhel6_vsftpd']['CREATED'] == '37 minutes ago'
# Same data in both accessors
assert result.data['rhel6_vsftpd'] == result.rows[0]
# Can't list repositories if they don't have a repository name
assert '<none>' not in result.data
def test_docker_list_images_no_data():
result = docker_list.DockerListImages(context_wrap(DOCKER_LIST_IMAGES_NO_DATA))
# All rows get read:
assert len(result.rows) == 0
assert result.no_data
def test_docker_list_containers():
result = docker_list.DockerListContainers(context_wrap(DOCKER_LIST_CONTAINERS))
assert len(result.rows) == 2
assert result.rows[0].get("CONTAINER ID") == "03e2861336a76e29155836113ff6560cb70780c32f95062642993b2b3d0fc216"
assert result.rows[0].get("COMMAND") == '"/usr/sbin/httpd -DFOREGROUND"'
assert result.rows[0].get("SIZE") == "796 B (virtual 669.2 MB)"
assert result.rows[0].get("CREATED") == "45 seconds ago"
assert result.rows[0].get("PORTS") == "0.0.0.0:8080->80/tcp"
assert result.rows[1].get("CONTAINER ID") == "95516ea08b565e37e2a4bca3333af40a240c368131b77276da8dec629b7fe102"
assert result.rows[1].get("COMMAND") == '"/bin/sh -c \'yum install -y vsftpd-2.2.2-6.el6\'"'
assert result.rows[1]['STATUS'] == 'Exited (137) 18 hours ago'
assert result.rows[1].get("PORTS") is None
assert sorted(result.data.keys()) == sorted(['angry_saha', 'tender_rosalind'])
assert result.data['angry_saha'] == result.rows[0]
assert result.data['tender_rosalind'] == result.rows[1]
|
python
|
from math import cos
from reclaimer.hek.defs.objs.tag import HekTag
class DeviTag(HekTag):
def calc_internal_data(self):
HekTag.calc_internal_data(self)
devi_attrs = self.data.tagdata.devi_attrs
devi_attrs.inv_power_acceleration_time = 0
devi_attrs.inv_power_transition_time = 0
devi_attrs.inv_position_acceleration_time = 0
devi_attrs.inv_position_transition_time = 0
devi_attrs.inv_depowered_acceleration_time = 0
devi_attrs.inv_depowered_transition_time = 0
if devi_attrs.power_acceleration_time:
devi_attrs.inv_power_acceleration_time = 1 / (
30 * devi_attrs.power_acceleration_time)
if devi_attrs.power_transition_time:
devi_attrs.inv_power_transition_time = 1 / (
30 * devi_attrs.power_transition_time)
if devi_attrs.depowered_position_acceleration_time:
devi_attrs.inv_depowered_acceleration_time = 1 / (
30 * devi_attrs.depowered_position_acceleration_time)
if devi_attrs.depowered_position_transition_time:
devi_attrs.inv_depowered_transition_time = 1 / (
30 * devi_attrs.depowered_position_transition_time)
if devi_attrs.position_acceleration_time:
devi_attrs.inv_position_acceleration_time = 1 / (
30 * devi_attrs.position_acceleration_time)
if devi_attrs.position_transition_time:
devi_attrs.inv_position_transition_time = 1 / (
30 * devi_attrs.position_transition_time)
|
python
|
from pepy.domain.model import ProjectName
def test_project_name_strip_spaces():
project = ProjectName(" some-project ")
assert "some-project" == project.name
def test_set_lowercase_to_project_name():
project = ProjectName("Some-Project")
assert "some-project" == project.name
|
python
|
from sqlalchemy import *
from sqlalchemy.engine import create_engine
from sqlalchemy.schema import *
from sqlalchemy.sql import text
# Hive
engine = create_engine('hive://192.168.10.12:10010/ltv')
sql = text("select * from ltv.rac_grant_credit limit 10")
sql_rst = engine.execute(sql).fetchall()
print sql_rst
|
python
|
orders_num = int(input())
total = 0
for n in range(orders_num):
price_capsule = float(input())
days = int(input())
capsule_count = int(input())
price = price_capsule * days * capsule_count
total += price
print(f'The price for the coffee is: ${price:.2f}')
print(f'Total: ${total:.2f}')
|
python
|
""" Models for notice app """
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from messaging.tasks import send_email
from condominium.models import Company
class Feedback(models.Model):
New = 'NEW'
Resolved = 'RES'
Ignored = 'IGN'
STATUS_CHOICES = (
(New, _('New feedback')),
(Resolved, _('Resolved feedback')),
(Ignored, _('Ignored feedback')),
)
# Fields
title = models.CharField(_('Feedback title'), max_length=100)
text = models.TextField(_('Feedback text'))
status = models.CharField(_('Feedback status'), max_length=3,
choices=STATUS_CHOICES, default=New)
# Creator and Date information
created_by = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name=_('Created by'),
blank=True, null=True, on_delete=models.CASCADE)
date_created = models.DateTimeField(_("Date created"), auto_now_add=True)
date_updated = models.DateTimeField(_("Date updated"), auto_now=True, db_index=True)
class Meta:
verbose_name = _('Feedback')
verbose_name_plural = _('Feedbacks')
ordering = ['-date_created']
def __str__(self):
return self.title
def save(self, *args, **kwargs):
service_email = Company.objects.first().service_email
if service_email:
send_email(self.title, self.text, to=[service_email]) # TODO add delay
super().save(*args, **kwargs) # Call the real save() method
|
python
|
#!/usr/bin/env python3
import pandas as pd
import numpy as np
import argparse
import subprocess
import os
import gzip
import feather
def load_pair_data(path):
if path.endswith('.txt.gz'):
return pd.read_csv(path, sep='\t', usecols=['pair_id', 'slope', 'slope_se'], index_col=0, dtype={'pair_id':str, 'slope':np.float32, 'slope_se':np.float32})
elif path.endswith('.ft'):
df = feather.read_dataframe(path, columns=['pair_id', 'slope', 'slope_se'])
df.set_index('pair_id', inplace=True)
return df
else:
raise ValueError('Input format not recognized.')
parser = argparse.ArgumentParser(description='Prepare METASOFT input.')
parser.add_argument('variant_gene_pair_files', help="List of variant-gene pair association result. Header must specify 'slope' and 'slope_se' columns.")
parser.add_argument('prefix', help='Prefix for output file: <prefix>.metasoft_input.[chunk000.]txt.gz')
parser.add_argument('--chunks', default=None, type=int, help='')
parser.add_argument('-o', '--output_dir', default='.', help='Output directory')
parser.add_argument('--write_full', action='store_true', help='Write full input table')
args = parser.parse_args()
with open(args.variant_gene_pair_files) as f:
paths = f.read().strip().split('\n')
sample_ids = np.array([os.path.split(i)[1].split('.')[0] for i in paths])
assert len(sample_ids)==len(np.unique(sample_ids))
# sort by sample ID
i = np.argsort(sample_ids)
sample_ids = sample_ids[i]
paths = np.array(paths)[i]
print('Reading input files')
df = load_pair_data(paths[0])
# input format: pair_id, tissue1_slope, tissue1_slope_se, tissue2_slope, tissue2_slope_s2, ...
metasoft_df = pd.DataFrame(0, index=df.index, columns=[j for i in sample_ids for j in [i+'_slope', i+'_slope_se']], dtype=np.float32)
metasoft_df[sample_ids[0]+'_slope'] = df['slope']
metasoft_df[sample_ids[0]+'_slope_se'] = df['slope_se']
for k,(i,p) in enumerate(zip(sample_ids[1:], paths[1:])):
print(' * processing {}/{}'.format(k+2, len(paths)), flush=True)
df = load_pair_data(p)
metasoft_df[i+'_slope'] = df['slope']
metasoft_df[i+'_slope_se'] = df['slope_se']
print()
print('Writing Metasoft input')
# split into chunks for parallelization
if args.chunks is not None:
chunk_size = int(np.ceil(metasoft_df.shape[0] / args.chunks))
for i in np.arange(args.chunks):
print(' * writing chunk {}/{}'.format(i+1, args.chunks), flush=True)
with gzip.open(os.path.join(args.output_dir, args.prefix+'.metasoft_input.chunk{:03d}.txt.gz'.format(i)), 'wt', compresslevel=1) as f:
metasoft_df.iloc[i*chunk_size:(i+1)*chunk_size].to_csv(f, sep='\t', float_format='%.6g', na_rep='NA')
print()
if args.write_full:
print('Writing full table')
with gzip.open(os.path.join(args.output_dir, args.prefix+'.metasoft_input.txt.gz'), 'wt', compresslevel=1) as f:
metasoft_df.to_csv(f, sep='\t', float_format='%.6g', na_rep='NA')
|
python
|
#from nose.plugins.skip import SkipTest
from nose.tools import assert_equal, raises
from mock import Mock, patch
from unittest import TestCase
from JenkinsPluginResolver.JenkinsPluginResolver import JenkinsPluginResolver
from os.path import dirname, realpath
class Test_JekinsPluginResolver(TestCase):
@patch('JenkinsPluginResolver.JenkinsPluginResolver.urlopen')
def setUp(self, mock_urlopen):
test_json_loc = '{}/test-update-center.json'.format(
dirname(realpath(__file__)))
with open(test_json_loc) as f:
test_json = f.read()
# mock the read return
mock = Mock()
mock.read.return_value = test_json
mock_urlopen.return_value = mock
self.jpr = JenkinsPluginResolver()
def test_the_test(self):
assert_equal(0, 0)
def test_uc_post(self):
self.jpr.uc_post()
def test_load(self):
self.jpr.load('plugin_1')
def test_dump(self):
r = dict()
assert_equal(self.jpr.dump(), r)
def test_clear(self):
self.jpr.clear()
def test_resolve_plugin(self):
self.jpr.load('plugin_1')
r = {'plugin_1': 'latest', 'plugin_2': 'latest', 'plugin_3': 'latest'}
assert_equal(self.jpr.dump(), r)
def test_clear_plugins(self):
self.jpr.load('plugin_1')
self.jpr.clear()
r = dict()
assert_equal(self.jpr.dump(), r)
def test_dupe_plugins(self):
self.jpr.load('plugin_1')
self.jpr.load('plugin_1')
self.jpr.load('plugin_1')
r = {'plugin_1': 'latest', 'plugin_2': 'latest', 'plugin_3': 'latest'}
assert_equal(self.jpr.dump(), r)
@raises(RuntimeError)
def test_bad_plugin(self):
self.jpr.load('plugin_4')
def test_pinned_plugin(self):
self.jpr.load('plugin_1', '2.3.5')
r = {'plugin_1': '2.3.5', 'plugin_2': 'latest', 'plugin_3': 'latest'}
assert_equal(self.jpr.dump(), r)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.