content
stringlengths 7
1.05M
| fixed_cases
stringlengths 1
1.28M
|
---|---|
# fixing the issue in food.py
# this is kind of a bug, thats not what we wanted
# this can be done by making the class variable an instance variable
class Food:
def __init__(self, name):
self.name = name # instance variable (attr)
self.fav_food = [] # class variable fix by making inst var
def set_fav_food(self, food: str):
self.fav_food.append(food)
person_a = Food('jerry')
person_a.set_fav_food('rice and pancake')
# person_a can access its own fav_food
print(person_a.fav_food)
person_b = Food('Lee')
person_b.set_fav_food('roated groundnut and catchup')
print(person_b.fav_food)
# when you check the output, you realise that the second instance doesn't modifies
# the fav_food variable
|
class Food:
def __init__(self, name):
self.name = name
self.fav_food = []
def set_fav_food(self, food: str):
self.fav_food.append(food)
person_a = food('jerry')
person_a.set_fav_food('rice and pancake')
print(person_a.fav_food)
person_b = food('Lee')
person_b.set_fav_food('roated groundnut and catchup')
print(person_b.fav_food)
|
'''
People module for the Derrida project.
It provides basic personography, VIAF lookup, and admin functionality to edit
people associated with Derrida's library.
'''
default_app_config = 'derrida.people.apps.PeopleConfig'
|
"""
People module for the Derrida project.
It provides basic personography, VIAF lookup, and admin functionality to edit
people associated with Derrida's library.
"""
default_app_config = 'derrida.people.apps.PeopleConfig'
|
"""
===============
=== Purpose ===
===============
Encodes the hierarchy of US political divisions.
This file, together with populations.py, replaces the static data portion of
state_info.py.
The location names used in this file match FluView names as specified in
fluview_locations.py of delphi-epidata.
===================
=== Explanation ===
===================
Although intended to be a more or less general-purpose description of the
various US geopolitical divisions, for all practical purposes the data in this
file corresponds to the FluView perspective of the world.
In this perspective, the US is a hierarchy where regions at any given level are
composed of smaller regions at a lower level. Notably, it may be possible to
subdivide a given region into multiple distinct sets of smaller regions.
However, the set of locations in any given subdivision fully covers and spans
the region being subdivided. In other words, there are never any gaps.
The root of the hierarchy is the national region (shortened to "nat") which
represents the entire US, including many of its territories. Each lower layer
of the hierarchy consists of smaller regions which combine together to form the
national region.
The leaves of the hierarchy are called "atoms" and have no further subdivisions
-- at least, not from a FluView perspective. These are typically US states,
although they also include some state fragments, territories, and cities.
By convention, the the middle layers of the hierarchy are collectively called
"regions". This includes, for example, the ten HHS regions as one subdivision
of national and the nine Census divisions as another. Each of the HHS and
Census regions is in turn made up of atoms -- mostly states, with a few
exceptions.
"""
class Locations:
"""Encodes the hierarchy of US political divisions."""
# atomic regions for FluView data (regions containing only themselves)
atom_list = [
# entire states
'ak', 'al', 'ar', 'az', 'ca', 'co', 'ct', 'de', 'fl', 'ga', 'hi', 'ia',
'id', 'il', 'in', 'ks', 'ky', 'la', 'ma', 'md', 'me', 'mi', 'mn', 'mo',
'ms', 'mt', 'nc', 'nd', 'ne', 'nh', 'nj', 'nm', 'nv', 'oh', 'ok', 'or',
'pa', 'ri', 'sc', 'sd', 'tn', 'tx', 'ut', 'va', 'vt', 'wa', 'wi', 'wv',
'wy',
# state fragments
'ny_minus_jfk',
# territories
'dc', 'pr', 'vi',
# cities
'jfk',
]
atom_map = {a: [a] for a in atom_list}
# national, HHS, and Census regions in terms of atoms
nat_list = ['nat']
nat_map = dict(zip(nat_list, [atom_list]))
hhs_list = ['hhs%d' % i for i in range(1, 11)]
hhs_map = dict(zip(hhs_list, [
['ct', 'ma', 'me', 'nh', 'ri', 'vt'],
['jfk', 'nj', 'ny_minus_jfk', 'pr', 'vi'],
['dc', 'de', 'md', 'pa', 'va', 'wv'],
['al', 'fl', 'ga', 'ky', 'ms', 'nc', 'sc', 'tn'],
['il', 'in', 'mi', 'mn', 'oh', 'wi'],
['ar', 'la', 'nm', 'ok', 'tx'],
['ia', 'ks', 'mo', 'ne'],
['co', 'mt', 'nd', 'sd', 'ut', 'wy'],
['az', 'ca', 'hi', 'nv'],
['ak', 'id', 'or', 'wa'],
]))
cen_list = ['cen%d' % i for i in range(1, 10)]
cen_map = dict(zip(cen_list, [
['ct', 'ma', 'me', 'nh', 'ri', 'vt'],
['jfk', 'nj', 'ny_minus_jfk', 'pa', 'pr', 'vi'],
['il', 'in', 'mi', 'oh', 'wi'],
['ia', 'ks', 'mn', 'mo', 'nd', 'ne', 'sd'],
['dc', 'de', 'fl', 'ga', 'md', 'nc', 'sc', 'va', 'wv'],
['al', 'ky', 'ms', 'tn'],
['ar', 'la', 'ok', 'tx'],
['az', 'co', 'id', 'mt', 'nm', 'nv', 'ut', 'wy'],
['ak', 'ca', 'hi', 'or', 'wa'],
]))
# New York state combines the "ny_minus_jfk" fragment with the "jfk" city
ny_state_list = ['ny']
ny_state_map = {ny_state_list[0]: ['jfk', 'ny_minus_jfk']}
# collections of all known locations
region_list = nat_list + hhs_list + cen_list + ny_state_list + atom_list
region_map = {}
region_map.update(nat_map)
region_map.update(hhs_map)
region_map.update(cen_map)
region_map.update(ny_state_map)
region_map.update(atom_map)
|
"""
===============
=== Purpose ===
===============
Encodes the hierarchy of US political divisions.
This file, together with populations.py, replaces the static data portion of
state_info.py.
The location names used in this file match FluView names as specified in
fluview_locations.py of delphi-epidata.
===================
=== Explanation ===
===================
Although intended to be a more or less general-purpose description of the
various US geopolitical divisions, for all practical purposes the data in this
file corresponds to the FluView perspective of the world.
In this perspective, the US is a hierarchy where regions at any given level are
composed of smaller regions at a lower level. Notably, it may be possible to
subdivide a given region into multiple distinct sets of smaller regions.
However, the set of locations in any given subdivision fully covers and spans
the region being subdivided. In other words, there are never any gaps.
The root of the hierarchy is the national region (shortened to "nat") which
represents the entire US, including many of its territories. Each lower layer
of the hierarchy consists of smaller regions which combine together to form the
national region.
The leaves of the hierarchy are called "atoms" and have no further subdivisions
-- at least, not from a FluView perspective. These are typically US states,
although they also include some state fragments, territories, and cities.
By convention, the the middle layers of the hierarchy are collectively called
"regions". This includes, for example, the ten HHS regions as one subdivision
of national and the nine Census divisions as another. Each of the HHS and
Census regions is in turn made up of atoms -- mostly states, with a few
exceptions.
"""
class Locations:
"""Encodes the hierarchy of US political divisions."""
atom_list = ['ak', 'al', 'ar', 'az', 'ca', 'co', 'ct', 'de', 'fl', 'ga', 'hi', 'ia', 'id', 'il', 'in', 'ks', 'ky', 'la', 'ma', 'md', 'me', 'mi', 'mn', 'mo', 'ms', 'mt', 'nc', 'nd', 'ne', 'nh', 'nj', 'nm', 'nv', 'oh', 'ok', 'or', 'pa', 'ri', 'sc', 'sd', 'tn', 'tx', 'ut', 'va', 'vt', 'wa', 'wi', 'wv', 'wy', 'ny_minus_jfk', 'dc', 'pr', 'vi', 'jfk']
atom_map = {a: [a] for a in atom_list}
nat_list = ['nat']
nat_map = dict(zip(nat_list, [atom_list]))
hhs_list = ['hhs%d' % i for i in range(1, 11)]
hhs_map = dict(zip(hhs_list, [['ct', 'ma', 'me', 'nh', 'ri', 'vt'], ['jfk', 'nj', 'ny_minus_jfk', 'pr', 'vi'], ['dc', 'de', 'md', 'pa', 'va', 'wv'], ['al', 'fl', 'ga', 'ky', 'ms', 'nc', 'sc', 'tn'], ['il', 'in', 'mi', 'mn', 'oh', 'wi'], ['ar', 'la', 'nm', 'ok', 'tx'], ['ia', 'ks', 'mo', 'ne'], ['co', 'mt', 'nd', 'sd', 'ut', 'wy'], ['az', 'ca', 'hi', 'nv'], ['ak', 'id', 'or', 'wa']]))
cen_list = ['cen%d' % i for i in range(1, 10)]
cen_map = dict(zip(cen_list, [['ct', 'ma', 'me', 'nh', 'ri', 'vt'], ['jfk', 'nj', 'ny_minus_jfk', 'pa', 'pr', 'vi'], ['il', 'in', 'mi', 'oh', 'wi'], ['ia', 'ks', 'mn', 'mo', 'nd', 'ne', 'sd'], ['dc', 'de', 'fl', 'ga', 'md', 'nc', 'sc', 'va', 'wv'], ['al', 'ky', 'ms', 'tn'], ['ar', 'la', 'ok', 'tx'], ['az', 'co', 'id', 'mt', 'nm', 'nv', 'ut', 'wy'], ['ak', 'ca', 'hi', 'or', 'wa']]))
ny_state_list = ['ny']
ny_state_map = {ny_state_list[0]: ['jfk', 'ny_minus_jfk']}
region_list = nat_list + hhs_list + cen_list + ny_state_list + atom_list
region_map = {}
region_map.update(nat_map)
region_map.update(hhs_map)
region_map.update(cen_map)
region_map.update(ny_state_map)
region_map.update(atom_map)
|
def calc_size(digest_size, seed_size, num_rounds, lowmc_k, lowmc_r, lowmc_m, is_unruh):
lowmc_n = lowmc_k
# bytes required to store one input share
input_size = (lowmc_k + 7) >> 3;
# bytes required to store one output share
output_size = (lowmc_n + 7) >> 3;
# number of bits per view per LowMC round
view_round_size = lowmc_m * 3;
# bytes required to store communicated bits (i.e. views) of one round
view_size = (view_round_size * lowmc_r + 7) >> 3;
# bytes required to store collapsed challenge
collapsed_challenge_size = (num_rounds + 3) >> 2;
if is_unruh:
unruh_without_input_bytes_size = seed_size + view_size;
unruh_with_input_bytes_size = unruh_without_input_bytes_size + input_size;
else:
unruh_without_input_bytes_size = unruh_with_input_bytes_size = 0;
# we can use unruh_without_input_bytes_size here. In call cases where we need
# to write more, we do not need to write the input share
per_round_size = input_size + view_size + digest_size + 2 * seed_size + unruh_without_input_bytes_size;
max_signature_size = collapsed_challenge_size + 32 + num_rounds * per_round_size;
print(unruh_without_input_bytes_size, unruh_with_input_bytes_size, max_signature_size)
# Picnic with partial Sbox layer
calc_size(32, 16, 219, 128, 20, 10, False)
calc_size(32, 16, 219, 128, 20, 10, True)
calc_size(48, 24, 329, 192, 30, 10, False)
calc_size(48, 24, 329, 192, 30, 10, True)
calc_size(64, 32, 438, 256, 38, 10, False)
calc_size(64, 32, 438, 256, 38, 10, True)
# Picnic with full Sbox layer
calc_size(32, 16, 219, 129, 4, 43, False)
calc_size(48, 24, 329, 192, 4, 64, False)
calc_size(64, 32, 438, 255, 4, 85, False)
|
def calc_size(digest_size, seed_size, num_rounds, lowmc_k, lowmc_r, lowmc_m, is_unruh):
lowmc_n = lowmc_k
input_size = lowmc_k + 7 >> 3
output_size = lowmc_n + 7 >> 3
view_round_size = lowmc_m * 3
view_size = view_round_size * lowmc_r + 7 >> 3
collapsed_challenge_size = num_rounds + 3 >> 2
if is_unruh:
unruh_without_input_bytes_size = seed_size + view_size
unruh_with_input_bytes_size = unruh_without_input_bytes_size + input_size
else:
unruh_without_input_bytes_size = unruh_with_input_bytes_size = 0
per_round_size = input_size + view_size + digest_size + 2 * seed_size + unruh_without_input_bytes_size
max_signature_size = collapsed_challenge_size + 32 + num_rounds * per_round_size
print(unruh_without_input_bytes_size, unruh_with_input_bytes_size, max_signature_size)
calc_size(32, 16, 219, 128, 20, 10, False)
calc_size(32, 16, 219, 128, 20, 10, True)
calc_size(48, 24, 329, 192, 30, 10, False)
calc_size(48, 24, 329, 192, 30, 10, True)
calc_size(64, 32, 438, 256, 38, 10, False)
calc_size(64, 32, 438, 256, 38, 10, True)
calc_size(32, 16, 219, 129, 4, 43, False)
calc_size(48, 24, 329, 192, 4, 64, False)
calc_size(64, 32, 438, 255, 4, 85, False)
|
# -*- coding: utf-8 -*-
class dllink:
"""doubly linked node (that may also be a "head" a list)
A Doubly-linked List class. This class simply contains a link of
node's. By adding a "head" node (sentinel), deleting a node is
extremely fast (see "Introduction to Algorithm"). This class does
not keep the length information as it is not necessary for the FM
algorithm. This saves memory and run-time to update the length
information. Note that this class does not own the list node. They
are supplied by the caller in order to better reuse the nodes.
"""
__slots__ = ('key', 'next', 'prev', 'index')
def __init__(self, index=None):
"""initialization
Keyword Arguments:
index (type): description (default: {None})
"""
self.key = 0
self.next = self.prev = self
self.index = index
def detach(self):
"""detach from a list """
assert self.next
n = self.next
p = self.prev
p.next = n
n.prev = p
def lock(self):
"""lock the node (and don't append it to any list) """
self.next = None
def is_locked(self):
"""whether the node is locked
Returns:
bool: description
"""
return self.next is None
def is_empty(self):
"""whether the list is empty
Returns:
bool: description
"""
return self.next == self
def clear(self):
"""clear"""
self.next = self.prev = self
def appendleft(self, node):
"""append the node to the front
Arguments:
node (dllink): description
"""
node.next = self.next
self.next.prev = node
self.next = node
node.prev = self
def append(self, node):
"""append the node to the back
Arguments:
node (dllink): description
"""
node.prev = self.prev
self.prev.next = node
self.prev = node
node.next = self
def popleft(self):
"""pop a node from the front
Returns:
dllink: description
"""
res = self.next
self.next = res.next
self.next.prev = self
return res
def pop(self):
"""pop a node from the back
Returns:
dllink: description
"""
res = self.prev
self.prev = res.prev
self.prev.next = self
return res
def __iter__(self):
"""iterable
Returns:
dllink: itself
"""
cur = self.next
while cur != self:
yield cur
cur = cur.next
# class dll_iterator:
# """List iterator
# Traverse the list from the first item. Usually it is safe
# to attach/detach list items during the iterator is active.
# """
# def __init__(self, link):
# """Initialization
# Arguments:
# link (dllink): description
# """
# self.link = link
# self.cur = link.next
# def next(self):
# """Get the next item
# Raises:
# StopIteration: description
# Returns:
# dllink: the next item
# """
# if self.cur != self.link:
# res = self.cur
# self.cur = self.cur.next
# return res
# else:
# raise StopIteration
# def __next__(self):
# """[summary]
# Returns:
# dtype: description
# """
# return self.next()
|
class Dllink:
"""doubly linked node (that may also be a "head" a list)
A Doubly-linked List class. This class simply contains a link of
node's. By adding a "head" node (sentinel), deleting a node is
extremely fast (see "Introduction to Algorithm"). This class does
not keep the length information as it is not necessary for the FM
algorithm. This saves memory and run-time to update the length
information. Note that this class does not own the list node. They
are supplied by the caller in order to better reuse the nodes.
"""
__slots__ = ('key', 'next', 'prev', 'index')
def __init__(self, index=None):
"""initialization
Keyword Arguments:
index (type): description (default: {None})
"""
self.key = 0
self.next = self.prev = self
self.index = index
def detach(self):
"""detach from a list """
assert self.next
n = self.next
p = self.prev
p.next = n
n.prev = p
def lock(self):
"""lock the node (and don't append it to any list) """
self.next = None
def is_locked(self):
"""whether the node is locked
Returns:
bool: description
"""
return self.next is None
def is_empty(self):
"""whether the list is empty
Returns:
bool: description
"""
return self.next == self
def clear(self):
"""clear"""
self.next = self.prev = self
def appendleft(self, node):
"""append the node to the front
Arguments:
node (dllink): description
"""
node.next = self.next
self.next.prev = node
self.next = node
node.prev = self
def append(self, node):
"""append the node to the back
Arguments:
node (dllink): description
"""
node.prev = self.prev
self.prev.next = node
self.prev = node
node.next = self
def popleft(self):
"""pop a node from the front
Returns:
dllink: description
"""
res = self.next
self.next = res.next
self.next.prev = self
return res
def pop(self):
"""pop a node from the back
Returns:
dllink: description
"""
res = self.prev
self.prev = res.prev
self.prev.next = self
return res
def __iter__(self):
"""iterable
Returns:
dllink: itself
"""
cur = self.next
while cur != self:
yield cur
cur = cur.next
|
"""Internal API endpoint constant library.
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
_firewall_policies_endpoints = [
[
"queryCombinedFirewallPolicyMembers",
"GET",
"/policy/combined/firewall-members/v1",
"Search for members of a Firewall Policy in your environment by providing an FQL "
"filter and paging details. Returns a set of host details which match the filter criteria",
"firewall_policies",
[
{
"type": "string",
"description": "The ID of the Firewall Policy to search for members of",
"name": "id",
"in": "query"
},
{
"type": "string",
"description": "The filter expression that should be used to limit the results",
"name": "filter",
"in": "query"
},
{
"minimum": 0,
"type": "integer",
"description": "The offset to start retrieving records from",
"name": "offset",
"in": "query"
},
{
"maximum": 5000,
"minimum": 1,
"type": "integer",
"description": "The maximum records to return. [1-5000]",
"name": "limit",
"in": "query"
},
{
"type": "string",
"description": "The property to sort by",
"name": "sort",
"in": "query"
}
]
],
[
"queryCombinedFirewallPolicies",
"GET",
"/policy/combined/firewall/v1",
"Search for Firewall Policies in your environment by providing an FQL filter and paging details. "
"Returns a set of Firewall Policies which match the filter criteria",
"firewall_policies",
[
{
"type": "string",
"description": "The filter expression that should be used to limit the results",
"name": "filter",
"in": "query"
},
{
"minimum": 0,
"type": "integer",
"description": "The offset to start retrieving records from",
"name": "offset",
"in": "query"
},
{
"maximum": 5000,
"minimum": 1,
"type": "integer",
"description": "The maximum records to return. [1-5000]",
"name": "limit",
"in": "query"
},
{
"enum": [
"created_by.asc",
"created_by.desc",
"created_timestamp.asc",
"created_timestamp.desc",
"enabled.asc",
"enabled.desc",
"modified_by.asc",
"modified_by.desc",
"modified_timestamp.asc",
"modified_timestamp.desc",
"name.asc",
"name.desc",
"platform_name.asc",
"platform_name.desc",
"precedence.asc",
"precedence.desc"
],
"type": "string",
"description": "The property to sort by",
"name": "sort",
"in": "query"
}
]
],
[
"performFirewallPoliciesAction",
"POST",
"/policy/entities/firewall-actions/v1",
"Perform the specified action on the Firewall Policies specified in the request",
"firewall_policies",
[
{
"enum": [
"add-host-group",
"disable",
"enable",
"remove-host-group"
],
"type": "string",
"description": "The action to perform",
"name": "action_name",
"in": "query",
"required": True
},
{
"name": "body",
"in": "body",
"required": True
}
]
],
[
"setFirewallPoliciesPrecedence",
"POST",
"/policy/entities/firewall-precedence/v1",
"Sets the precedence of Firewall Policies based on the order of IDs specified in the request. "
"The first ID specified will have the highest precedence and the last ID specified will have the lowest. "
"You must specify all non-Default Policies for a platform when updating precedence",
"firewall_policies",
[
{
"name": "body",
"in": "body",
"required": True
}
]
],
[
"getFirewallPolicies",
"GET",
"/policy/entities/firewall/v1",
"Retrieve a set of Firewall Policies by specifying their IDs",
"firewall_policies",
[
{
"type": "array",
"items": {
"maxLength": 32,
"minLength": 32,
"type": "string"
},
"collectionFormat": "multi",
"description": "The IDs of the Firewall Policies to return",
"name": "ids",
"in": "query",
"required": True
}
]
],
[
"createFirewallPolicies",
"POST",
"/policy/entities/firewall/v1",
"Create Firewall Policies by specifying details about the policy to create",
"firewall_policies",
[
{
"name": "body",
"in": "body",
"required": True
},
{
"maxLength": 32,
"minLength": 32,
"type": "string",
"description": "The policy ID to be cloned from",
"name": "clone_id",
"in": "query"
}
]
],
[
"updateFirewallPolicies",
"PATCH",
"/policy/entities/firewall/v1",
"Update Firewall Policies by specifying the ID of the policy and details to update",
"firewall_policies",
[
{
"name": "body",
"in": "body",
"required": True
}
]
],
[
"deleteFirewallPolicies",
"DELETE",
"/policy/entities/firewall/v1",
"Delete a set of Firewall Policies by specifying their IDs",
"firewall_policies",
[
{
"type": "array",
"items": {
"maxLength": 32,
"minLength": 32,
"type": "string"
},
"collectionFormat": "multi",
"description": "The IDs of the Firewall Policies to delete",
"name": "ids",
"in": "query",
"required": True
}
]
],
[
"queryFirewallPolicyMembers",
"GET",
"/policy/queries/firewall-members/v1",
"Search for members of a Firewall Policy in your environment by providing an FQL filter and paging details. "
"Returns a set of Agent IDs which match the filter criteria",
"firewall_policies",
[
{
"type": "string",
"description": "The ID of the Firewall Policy to search for members of",
"name": "id",
"in": "query"
},
{
"type": "string",
"description": "The filter expression that should be used to limit the results",
"name": "filter",
"in": "query"
},
{
"minimum": 0,
"type": "integer",
"description": "The offset to start retrieving records from",
"name": "offset",
"in": "query"
},
{
"maximum": 5000,
"minimum": 1,
"type": "integer",
"description": "The maximum records to return. [1-5000]",
"name": "limit",
"in": "query"
},
{
"type": "string",
"description": "The property to sort by",
"name": "sort",
"in": "query"
}
]
],
[
"queryFirewallPolicies",
"GET",
"/policy/queries/firewall/v1",
"Search for Firewall Policies in your environment by providing an FQL filter and paging details. "
"Returns a set of Firewall Policy IDs which match the filter criteria",
"firewall_policies",
[
{
"type": "string",
"description": "The filter expression that should be used to limit the results",
"name": "filter",
"in": "query"
},
{
"minimum": 0,
"type": "integer",
"description": "The offset to start retrieving records from",
"name": "offset",
"in": "query"
},
{
"maximum": 5000,
"minimum": 1,
"type": "integer",
"description": "The maximum records to return. [1-5000]",
"name": "limit",
"in": "query"
},
{
"enum": [
"created_by.asc",
"created_by.desc",
"created_timestamp.asc",
"created_timestamp.desc",
"enabled.asc",
"enabled.desc",
"modified_by.asc",
"modified_by.desc",
"modified_timestamp.asc",
"modified_timestamp.desc",
"name.asc",
"name.desc",
"platform_name.asc",
"platform_name.desc",
"precedence.asc",
"precedence.desc"
],
"type": "string",
"description": "The property to sort by",
"name": "sort",
"in": "query"
}
]
]
]
|
"""Internal API endpoint constant library.
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
_firewall_policies_endpoints = [['queryCombinedFirewallPolicyMembers', 'GET', '/policy/combined/firewall-members/v1', 'Search for members of a Firewall Policy in your environment by providing an FQL filter and paging details. Returns a set of host details which match the filter criteria', 'firewall_policies', [{'type': 'string', 'description': 'The ID of the Firewall Policy to search for members of', 'name': 'id', 'in': 'query'}, {'type': 'string', 'description': 'The filter expression that should be used to limit the results', 'name': 'filter', 'in': 'query'}, {'minimum': 0, 'type': 'integer', 'description': 'The offset to start retrieving records from', 'name': 'offset', 'in': 'query'}, {'maximum': 5000, 'minimum': 1, 'type': 'integer', 'description': 'The maximum records to return. [1-5000]', 'name': 'limit', 'in': 'query'}, {'type': 'string', 'description': 'The property to sort by', 'name': 'sort', 'in': 'query'}]], ['queryCombinedFirewallPolicies', 'GET', '/policy/combined/firewall/v1', 'Search for Firewall Policies in your environment by providing an FQL filter and paging details. Returns a set of Firewall Policies which match the filter criteria', 'firewall_policies', [{'type': 'string', 'description': 'The filter expression that should be used to limit the results', 'name': 'filter', 'in': 'query'}, {'minimum': 0, 'type': 'integer', 'description': 'The offset to start retrieving records from', 'name': 'offset', 'in': 'query'}, {'maximum': 5000, 'minimum': 1, 'type': 'integer', 'description': 'The maximum records to return. [1-5000]', 'name': 'limit', 'in': 'query'}, {'enum': ['created_by.asc', 'created_by.desc', 'created_timestamp.asc', 'created_timestamp.desc', 'enabled.asc', 'enabled.desc', 'modified_by.asc', 'modified_by.desc', 'modified_timestamp.asc', 'modified_timestamp.desc', 'name.asc', 'name.desc', 'platform_name.asc', 'platform_name.desc', 'precedence.asc', 'precedence.desc'], 'type': 'string', 'description': 'The property to sort by', 'name': 'sort', 'in': 'query'}]], ['performFirewallPoliciesAction', 'POST', '/policy/entities/firewall-actions/v1', 'Perform the specified action on the Firewall Policies specified in the request', 'firewall_policies', [{'enum': ['add-host-group', 'disable', 'enable', 'remove-host-group'], 'type': 'string', 'description': 'The action to perform', 'name': 'action_name', 'in': 'query', 'required': True}, {'name': 'body', 'in': 'body', 'required': True}]], ['setFirewallPoliciesPrecedence', 'POST', '/policy/entities/firewall-precedence/v1', 'Sets the precedence of Firewall Policies based on the order of IDs specified in the request. The first ID specified will have the highest precedence and the last ID specified will have the lowest. You must specify all non-Default Policies for a platform when updating precedence', 'firewall_policies', [{'name': 'body', 'in': 'body', 'required': True}]], ['getFirewallPolicies', 'GET', '/policy/entities/firewall/v1', 'Retrieve a set of Firewall Policies by specifying their IDs', 'firewall_policies', [{'type': 'array', 'items': {'maxLength': 32, 'minLength': 32, 'type': 'string'}, 'collectionFormat': 'multi', 'description': 'The IDs of the Firewall Policies to return', 'name': 'ids', 'in': 'query', 'required': True}]], ['createFirewallPolicies', 'POST', '/policy/entities/firewall/v1', 'Create Firewall Policies by specifying details about the policy to create', 'firewall_policies', [{'name': 'body', 'in': 'body', 'required': True}, {'maxLength': 32, 'minLength': 32, 'type': 'string', 'description': 'The policy ID to be cloned from', 'name': 'clone_id', 'in': 'query'}]], ['updateFirewallPolicies', 'PATCH', '/policy/entities/firewall/v1', 'Update Firewall Policies by specifying the ID of the policy and details to update', 'firewall_policies', [{'name': 'body', 'in': 'body', 'required': True}]], ['deleteFirewallPolicies', 'DELETE', '/policy/entities/firewall/v1', 'Delete a set of Firewall Policies by specifying their IDs', 'firewall_policies', [{'type': 'array', 'items': {'maxLength': 32, 'minLength': 32, 'type': 'string'}, 'collectionFormat': 'multi', 'description': 'The IDs of the Firewall Policies to delete', 'name': 'ids', 'in': 'query', 'required': True}]], ['queryFirewallPolicyMembers', 'GET', '/policy/queries/firewall-members/v1', 'Search for members of a Firewall Policy in your environment by providing an FQL filter and paging details. Returns a set of Agent IDs which match the filter criteria', 'firewall_policies', [{'type': 'string', 'description': 'The ID of the Firewall Policy to search for members of', 'name': 'id', 'in': 'query'}, {'type': 'string', 'description': 'The filter expression that should be used to limit the results', 'name': 'filter', 'in': 'query'}, {'minimum': 0, 'type': 'integer', 'description': 'The offset to start retrieving records from', 'name': 'offset', 'in': 'query'}, {'maximum': 5000, 'minimum': 1, 'type': 'integer', 'description': 'The maximum records to return. [1-5000]', 'name': 'limit', 'in': 'query'}, {'type': 'string', 'description': 'The property to sort by', 'name': 'sort', 'in': 'query'}]], ['queryFirewallPolicies', 'GET', '/policy/queries/firewall/v1', 'Search for Firewall Policies in your environment by providing an FQL filter and paging details. Returns a set of Firewall Policy IDs which match the filter criteria', 'firewall_policies', [{'type': 'string', 'description': 'The filter expression that should be used to limit the results', 'name': 'filter', 'in': 'query'}, {'minimum': 0, 'type': 'integer', 'description': 'The offset to start retrieving records from', 'name': 'offset', 'in': 'query'}, {'maximum': 5000, 'minimum': 1, 'type': 'integer', 'description': 'The maximum records to return. [1-5000]', 'name': 'limit', 'in': 'query'}, {'enum': ['created_by.asc', 'created_by.desc', 'created_timestamp.asc', 'created_timestamp.desc', 'enabled.asc', 'enabled.desc', 'modified_by.asc', 'modified_by.desc', 'modified_timestamp.asc', 'modified_timestamp.desc', 'name.asc', 'name.desc', 'platform_name.asc', 'platform_name.desc', 'precedence.asc', 'precedence.desc'], 'type': 'string', 'description': 'The property to sort by', 'name': 'sort', 'in': 'query'}]]]
|
def visit_rate_ate(df, test_set=False):
if test_set:
treatment_visit_rate = df[df.Treatment == 1].Outcome.mean() * 100
control_visit_rate = df[df.Treatment == 0].Outcome.mean() * 100
average_treatment_effect = treatment_visit_rate - control_visit_rate
print("Test set visit rate uplift: {:.2f}%".format(average_treatment_effect))
return average_treatment_effect
else:
mens = df[df.segment == "Mens E-Mail"].visit.mean() * 100
womens = df[df.segment == "Womens E-Mail"].visit.mean() * 100
control = df[df.segment == "No E-Mail"].visit.mean() * 100
print("Men's E-Mail visit rate: {:.2f}%".format(mens))
print("Women's E-Mail visit rate: {:.2f}%".format(womens))
print("Control E-mail visit Rate: {:.2f}%".format(control))
print("---------------------------------")
print("Men's visit rate uplift: {:.2f}%".format(mens - control))
print("Women's visit rate uplift: {:.2f}%".format(womens - control))
def conversion_rate_ate(df):
mens = df[df.segment == "Mens E-Mail"].conversion.mean() * 100
womens = df[df.segment == "Womens E-Mail"].conversion.mean() * 100
control = df[df.segment == "No E-Mail"].conversion.mean() * 100
print("Men's E-Mail conversion rate: {:.2f}%".format(mens))
print("Women's E-Mail conversion rate: {:.2f}%".format(womens))
print("Control E-mail conversion Rate: {:.2f}%".format(control))
print("---------------------------------")
print("Men's conversion rate uplift: {:.2f}%".format(mens - control))
print("Women's conversion rate uplift: {:.2f}%".format(womens - control))
def spending_ate(df):
mens = df[df.segment == "Mens E-Mail"].spend.mean()
womens = df[df.segment == "Womens E-Mail"].spend.mean()
control = df[df.segment == "No E-Mail"].spend.mean()
print("Men's E-Mail spending: ${:.2f}".format(mens))
print("Women's E-Mail spending: ${:.2f}".format(womens))
print("Control E-mail spending: ${:.2f}".format(control))
print("---------------------------------")
print("Men's spending uplift: ${:.2f}".format(mens - control))
print("Women's spending uplift: ${:.2f}".format(womens - control))
def spend_given_purchase(df):
print("Men's average spend given purchase: ${:.2f}".format(
df[(df.conversion == 1) & (df.segment == 'Mens E-Mail')].spend.mean()))
print("Women's average spend given purchase: ${:.2f}".format(
df[(df.conversion == 1) & (df.segment == 'Womens E-Mail')].spend.mean()))
print("Control average spend given purchase: ${:.2f}".format(
df[(df.conversion == 1) & (df.segment == 'No E-Mail')].spend.mean()))
def purchase_given_visit(df):
print("Men's purchase rate given visit: {:.2f}%".format(100 * (
len(df[(df.conversion == 1) & (df.segment == 'Mens E-Mail')]) / len(
df[(df.visit == 1) & (df.segment == 'Mens E-Mail')]))))
print("Women's purchase rate given visit: {:.2f}%".format(100 * (
len(df[(df.conversion == 1) & (df.segment == 'Womens E-Mail')]) / len(
df[(df.visit == 1) & (df.segment == 'Womens E-Mail')]))))
print("Control purchase rate given visit: {:.2f}%".format(100 * (
len(df[(df.conversion == 1) & (df.segment == 'No E-Mail')]) / len(
df[(df.visit == 1) & (df.segment == 'No E-Mail')]))))
def visit_rate(df):
print("Men's visit rate: {:.2f}%".format(
100 * (len(df[(df.visit == 1) & (df.segment == 'Mens E-Mail')]) / len(df[(df.segment == 'Mens E-Mail')]))))
print("Women's visit rate: {:.2f}%".format(
100 * (len(df[(df.visit == 1) & (df.segment == 'Womens E-Mail')]) / len(df[(df.segment == 'Womens E-Mail')]))))
print("Control visit rate: {:.2f}%".format(
100 * (len(df[(df.visit == 1) & (df.segment == 'No E-Mail')]) / len(df[(df.segment == 'No E-Mail')]))))
def spend_per_head(df):
print("Men's mean spend: ${:.2f}".format(df[(df.segment == 'Mens E-Mail')].spend.mean()))
print("Women's mean spend: ${:.2f}".format(df[(df.segment == 'Womens E-Mail')].spend.mean()))
print("Control mean spend: ${:.2f}".format(df[(df.segment == 'No E-Mail')].spend.mean()))
|
def visit_rate_ate(df, test_set=False):
if test_set:
treatment_visit_rate = df[df.Treatment == 1].Outcome.mean() * 100
control_visit_rate = df[df.Treatment == 0].Outcome.mean() * 100
average_treatment_effect = treatment_visit_rate - control_visit_rate
print('Test set visit rate uplift: {:.2f}%'.format(average_treatment_effect))
return average_treatment_effect
else:
mens = df[df.segment == 'Mens E-Mail'].visit.mean() * 100
womens = df[df.segment == 'Womens E-Mail'].visit.mean() * 100
control = df[df.segment == 'No E-Mail'].visit.mean() * 100
print("Men's E-Mail visit rate: {:.2f}%".format(mens))
print("Women's E-Mail visit rate: {:.2f}%".format(womens))
print('Control E-mail visit Rate: {:.2f}%'.format(control))
print('---------------------------------')
print("Men's visit rate uplift: {:.2f}%".format(mens - control))
print("Women's visit rate uplift: {:.2f}%".format(womens - control))
def conversion_rate_ate(df):
mens = df[df.segment == 'Mens E-Mail'].conversion.mean() * 100
womens = df[df.segment == 'Womens E-Mail'].conversion.mean() * 100
control = df[df.segment == 'No E-Mail'].conversion.mean() * 100
print("Men's E-Mail conversion rate: {:.2f}%".format(mens))
print("Women's E-Mail conversion rate: {:.2f}%".format(womens))
print('Control E-mail conversion Rate: {:.2f}%'.format(control))
print('---------------------------------')
print("Men's conversion rate uplift: {:.2f}%".format(mens - control))
print("Women's conversion rate uplift: {:.2f}%".format(womens - control))
def spending_ate(df):
mens = df[df.segment == 'Mens E-Mail'].spend.mean()
womens = df[df.segment == 'Womens E-Mail'].spend.mean()
control = df[df.segment == 'No E-Mail'].spend.mean()
print("Men's E-Mail spending: ${:.2f}".format(mens))
print("Women's E-Mail spending: ${:.2f}".format(womens))
print('Control E-mail spending: ${:.2f}'.format(control))
print('---------------------------------')
print("Men's spending uplift: ${:.2f}".format(mens - control))
print("Women's spending uplift: ${:.2f}".format(womens - control))
def spend_given_purchase(df):
print("Men's average spend given purchase: ${:.2f}".format(df[(df.conversion == 1) & (df.segment == 'Mens E-Mail')].spend.mean()))
print("Women's average spend given purchase: ${:.2f}".format(df[(df.conversion == 1) & (df.segment == 'Womens E-Mail')].spend.mean()))
print('Control average spend given purchase: ${:.2f}'.format(df[(df.conversion == 1) & (df.segment == 'No E-Mail')].spend.mean()))
def purchase_given_visit(df):
print("Men's purchase rate given visit: {:.2f}%".format(100 * (len(df[(df.conversion == 1) & (df.segment == 'Mens E-Mail')]) / len(df[(df.visit == 1) & (df.segment == 'Mens E-Mail')]))))
print("Women's purchase rate given visit: {:.2f}%".format(100 * (len(df[(df.conversion == 1) & (df.segment == 'Womens E-Mail')]) / len(df[(df.visit == 1) & (df.segment == 'Womens E-Mail')]))))
print('Control purchase rate given visit: {:.2f}%'.format(100 * (len(df[(df.conversion == 1) & (df.segment == 'No E-Mail')]) / len(df[(df.visit == 1) & (df.segment == 'No E-Mail')]))))
def visit_rate(df):
print("Men's visit rate: {:.2f}%".format(100 * (len(df[(df.visit == 1) & (df.segment == 'Mens E-Mail')]) / len(df[df.segment == 'Mens E-Mail']))))
print("Women's visit rate: {:.2f}%".format(100 * (len(df[(df.visit == 1) & (df.segment == 'Womens E-Mail')]) / len(df[df.segment == 'Womens E-Mail']))))
print('Control visit rate: {:.2f}%'.format(100 * (len(df[(df.visit == 1) & (df.segment == 'No E-Mail')]) / len(df[df.segment == 'No E-Mail']))))
def spend_per_head(df):
print("Men's mean spend: ${:.2f}".format(df[df.segment == 'Mens E-Mail'].spend.mean()))
print("Women's mean spend: ${:.2f}".format(df[df.segment == 'Womens E-Mail'].spend.mean()))
print('Control mean spend: ${:.2f}'.format(df[df.segment == 'No E-Mail'].spend.mean()))
|
__all__ = [
'provider',
'songObj',
'spotifyClient',
'utils'
]
#! You should be able to do all you want with just theese three lines
#! from spotdl.search.spotifyClient import initialize
#! from spotdl.search.songObj import songObj
#! from spotdl.search.utils import *
|
__all__ = ['provider', 'songObj', 'spotifyClient', 'utils']
|
class Registry:
def __init__(self, name):
self._name = name
self._registry_dict = dict()
def register(self, name=None, obj=None):
if obj is not None:
if name is None:
name = obj.__name__
return self._register(obj, name)
return self._decorate(name)
def get(self, name):
if name not in self._registry_dict:
self._key_not_found(name)
return self._registry_dict[name]
def _register(self, obj, name):
if name in self._registry_dict:
raise KeyError("{} is already registered in {}".format(name, self._name))
self._registry_dict[name] = obj
def _decorate(self, name=None):
def wrap(obj):
cls_name = name
if cls_name is None:
cls_name = obj.__name__
self._register(obj, cls_name)
return obj
return wrap
def _key_not_found(self, name):
raise KeyError("{} is unknown type of {} ".format(name, self._name))
@property
def registry_dict(self):
return self._registry_dict
|
class Registry:
def __init__(self, name):
self._name = name
self._registry_dict = dict()
def register(self, name=None, obj=None):
if obj is not None:
if name is None:
name = obj.__name__
return self._register(obj, name)
return self._decorate(name)
def get(self, name):
if name not in self._registry_dict:
self._key_not_found(name)
return self._registry_dict[name]
def _register(self, obj, name):
if name in self._registry_dict:
raise key_error('{} is already registered in {}'.format(name, self._name))
self._registry_dict[name] = obj
def _decorate(self, name=None):
def wrap(obj):
cls_name = name
if cls_name is None:
cls_name = obj.__name__
self._register(obj, cls_name)
return obj
return wrap
def _key_not_found(self, name):
raise key_error('{} is unknown type of {} '.format(name, self._name))
@property
def registry_dict(self):
return self._registry_dict
|
#!/usr/bin/env python3
"""RZFeeser | Alta3 Research
learning about for logic"""
# create list of dictionaries for farms
farms = [{"name": "NE Farm", "agriculture": ["sheep", "cows", "pigs", "chickens", "llamas", "cats"]},
{"name": "W Farm", "agriculture": ["pigs", "chickens", "llamas"]},
{"name": "SE Farm", "agriculture": ["chickens", "carrots", "celery"]}]
for farm in farms:
print("\n" + farm.get("name"), end = ":\n")
for agri in farm.get("agriculture"):
print(agri, end = ", ")
print("END OF FARM")
|
"""RZFeeser | Alta3 Research
learning about for logic"""
farms = [{'name': 'NE Farm', 'agriculture': ['sheep', 'cows', 'pigs', 'chickens', 'llamas', 'cats']}, {'name': 'W Farm', 'agriculture': ['pigs', 'chickens', 'llamas']}, {'name': 'SE Farm', 'agriculture': ['chickens', 'carrots', 'celery']}]
for farm in farms:
print('\n' + farm.get('name'), end=':\n')
for agri in farm.get('agriculture'):
print(agri, end=', ')
print('END OF FARM')
|
def findRoot(x, power, epsilon):
"""Assumes x and epsilon an int or float, power an int, epsilon > 0
& power >= 1
Returns float y such that y**power is within epsilon of x.
If such float does not exist, it returns None"""
if x < 0 and power%2 ==0:
return None #since negative numbers have no even-powered roots
low = min(-1.0,x)
high = max(1.0,x)
ans = (high + low)/2.0
while abs(ans**power-x) >= epsilon:
if ans**power < x:
low = ans
else:
high = ans
ans = (high + low)/2.0
return ans
def testFindRoot():
epsilon = 0.0001
for x in [-0.25, 0.25, 2, -2, 8, -8]:
for power in range(1,4):
print('Testing x = ', str(x), ' and power = ', power)
result = findRoot(x, power, epsilon)
if result == None:
print(' No root')
else:
print(' ', result**power, ' = ', x)
|
def find_root(x, power, epsilon):
"""Assumes x and epsilon an int or float, power an int, epsilon > 0
& power >= 1
Returns float y such that y**power is within epsilon of x.
If such float does not exist, it returns None"""
if x < 0 and power % 2 == 0:
return None
low = min(-1.0, x)
high = max(1.0, x)
ans = (high + low) / 2.0
while abs(ans ** power - x) >= epsilon:
if ans ** power < x:
low = ans
else:
high = ans
ans = (high + low) / 2.0
return ans
def test_find_root():
epsilon = 0.0001
for x in [-0.25, 0.25, 2, -2, 8, -8]:
for power in range(1, 4):
print('Testing x = ', str(x), ' and power = ', power)
result = find_root(x, power, epsilon)
if result == None:
print(' No root')
else:
print(' ', result ** power, ' = ', x)
|
def dobro(n):
return n * 2
def metade(n):
return n / 2
def aumentar(n):
return n + (10 / 100 * n)
def diminuir(n):
return n - (13 / 100 * n)
|
def dobro(n):
return n * 2
def metade(n):
return n / 2
def aumentar(n):
return n + 10 / 100 * n
def diminuir(n):
return n - 13 / 100 * n
|
g = [ (['p'],[('cat','wff')]),
(['q'],[('cat','wff')]),
(['r'],[('cat','wff')]),
(['s'],[('cat','wff')]),
(['t'],[('cat','wff')]),
(['not'],[('sel','wff'),('cat','wff')]),
(['and'],[('sel','wff'),('sel','wff'),('cat','wff')]),
(['or'],[('sel','wff'),('sel','wff'),('cat','wff')]),
(['implies'],[('sel','wff'),('sel','wff'),('cat','wff')])
]
|
g = [(['p'], [('cat', 'wff')]), (['q'], [('cat', 'wff')]), (['r'], [('cat', 'wff')]), (['s'], [('cat', 'wff')]), (['t'], [('cat', 'wff')]), (['not'], [('sel', 'wff'), ('cat', 'wff')]), (['and'], [('sel', 'wff'), ('sel', 'wff'), ('cat', 'wff')]), (['or'], [('sel', 'wff'), ('sel', 'wff'), ('cat', 'wff')]), (['implies'], [('sel', 'wff'), ('sel', 'wff'), ('cat', 'wff')])]
|
def f(n, c=1):
print(c)
if c == n:
return c
f(n, c+1)
f(12)
|
def f(n, c=1):
print(c)
if c == n:
return c
f(n, c + 1)
f(12)
|
MD_HEADER = """\
# Changelog
"""
MD_ENTRY = """\
## {version}
[PR #{pr_number}]({pr_url}): {summary} (thanks [{committer}]({committer_url}))
"""
RST_HEADER = """\
Changelog
=========
"""
RST_ENTRY = """\
{version}
-------------------------------------------------
`PR #{pr_number} <{pr_url}>`__: {summary} (thanks `{committer} <{committer_url}>`__)
"""
TEMPLATES = {".md": (MD_HEADER, MD_ENTRY), ".rst": (RST_HEADER, RST_ENTRY)}
def update(current_changelog, path_extension, version, pr_event):
"""Update the changelog based on a merged pull request."""
header, entry_template = TEMPLATES[path_extension.lower()]
if current_changelog.strip() and not current_changelog.startswith(header):
raise ValueError("Changelog has a non-standard header")
details = {
"version": version,
"pr_number": pr_event["number"],
"pr_url": pr_event["html_url"],
"summary": pr_event["title"],
"committer": pr_event["user"]["login"],
"committer_url": pr_event["user"]["html_url"],
}
entry = entry_template.format_map(details)
changelog_no_header = current_changelog[len(header) :]
changelog = f"{header.strip()}\n\n{entry.strip()}\n\n{changelog_no_header.strip()}"
return f"{changelog.strip()}\n" # Guarantee a single trailing newline.
|
md_header = '# Changelog\n'
md_entry = '## {version}\n[PR #{pr_number}]({pr_url}): {summary} (thanks [{committer}]({committer_url}))\n'
rst_header = 'Changelog\n=========\n'
rst_entry = '{version}\n-------------------------------------------------\n`PR #{pr_number} <{pr_url}>`__: {summary} (thanks `{committer} <{committer_url}>`__)\n'
templates = {'.md': (MD_HEADER, MD_ENTRY), '.rst': (RST_HEADER, RST_ENTRY)}
def update(current_changelog, path_extension, version, pr_event):
"""Update the changelog based on a merged pull request."""
(header, entry_template) = TEMPLATES[path_extension.lower()]
if current_changelog.strip() and (not current_changelog.startswith(header)):
raise value_error('Changelog has a non-standard header')
details = {'version': version, 'pr_number': pr_event['number'], 'pr_url': pr_event['html_url'], 'summary': pr_event['title'], 'committer': pr_event['user']['login'], 'committer_url': pr_event['user']['html_url']}
entry = entry_template.format_map(details)
changelog_no_header = current_changelog[len(header):]
changelog = f'{header.strip()}\n\n{entry.strip()}\n\n{changelog_no_header.strip()}'
return f'{changelog.strip()}\n'
|
nop = b'\x00\x00'
brk = b'\x00\xA0'
lde = b'\x63\x07' # Load 0x07 (character 'a') into register V3
skp = b'\xE3\xA1' # Skip next instruction if user is NOT pressing character held in V3
with open("sknpvxtest.bin", 'wb') as f:
f.write(lde) # 0x0200 <-- Load the byte 0x07 into register V3
f.write(brk) # 0x0202 <-- Wait here until we receive the go signal (we should get some characters on the mockinput pipe before here)
f.write(skp) # 0x0204 <-- Now we should read the input pipe and check it for 'a'
f.write(brk) # 0x0206 <-- If it worked, we should skip this breakpoint.
f.write(nop) # 0x0208 <-- NOP a few times for good measure...
f.write(nop) # 0x020A
f.write(brk) # 0x020C <-- Stop here until we check the PC (which should be here, not at 0x0206) and get a character over the pipe.
f.write(skp) # 0x020E <-- Now we should read the input pipe and check it for 'a'
f.write(brk) # 0x0210 <-- If it worked, we should break here.
f.write(brk) # 0x0212 <-- If it didn't work, we will break here... or at one of the following breakpoints.
f.write(brk) # 0x0214
f.write(brk) # 0x0216
|
nop = b'\x00\x00'
brk = b'\x00\xa0'
lde = b'c\x07'
skp = b'\xe3\xa1'
with open('sknpvxtest.bin', 'wb') as f:
f.write(lde)
f.write(brk)
f.write(skp)
f.write(brk)
f.write(nop)
f.write(nop)
f.write(brk)
f.write(skp)
f.write(brk)
f.write(brk)
f.write(brk)
f.write(brk)
|
load("@bazel_skylib//rules:run_binary.bzl", "run_binary")
load("@rules_cc//cc:defs.bzl", "cc_library")
def rust_cxx_bridge(name, src, deps = []):
native.alias(
name = "%s/header" % name,
actual = src + ".h",
)
native.alias(
name = "%s/source" % name,
actual = src + ".cc",
)
run_binary(
name = "%s/generated" % name,
srcs = [src],
outs = [
src + ".h",
src + ".cc",
],
args = [
"$(location %s)" % src,
"-o",
"$(location %s.h)" % src,
"-o",
"$(location %s.cc)" % src,
],
tool = "//:codegen",
)
cc_library(
name = name,
srcs = [src + ".cc"],
deps = deps + [":%s/include" % name],
)
cc_library(
name = "%s/include" % name,
hdrs = [src + ".h"],
)
|
load('@bazel_skylib//rules:run_binary.bzl', 'run_binary')
load('@rules_cc//cc:defs.bzl', 'cc_library')
def rust_cxx_bridge(name, src, deps=[]):
native.alias(name='%s/header' % name, actual=src + '.h')
native.alias(name='%s/source' % name, actual=src + '.cc')
run_binary(name='%s/generated' % name, srcs=[src], outs=[src + '.h', src + '.cc'], args=['$(location %s)' % src, '-o', '$(location %s.h)' % src, '-o', '$(location %s.cc)' % src], tool='//:codegen')
cc_library(name=name, srcs=[src + '.cc'], deps=deps + [':%s/include' % name])
cc_library(name='%s/include' % name, hdrs=[src + '.h'])
|
# Miho Damage Skin
success = sm.addDamageSkin(2436044)
if success:
sm.chat("The Miho Damage Skin has been added to your account's damage skin collection.")
sm.consumeItem(2436044)
|
success = sm.addDamageSkin(2436044)
if success:
sm.chat("The Miho Damage Skin has been added to your account's damage skin collection.")
sm.consumeItem(2436044)
|
#!/usr/bin/python3
def lie_bracket(f, g, q):
"""Take the Lie bracket of two vector fields.
[f, g] = (d/dq)f * g - (d/dq)g * f
Args:
f (sympy.matrix): an N x 1 symbolic vector
g (sympy.matrix): an N x 1 symbolic
q (sympy.matrix or List): a length N array like object of coordinates to take partial derivates.
Returns:
[f, g] (sympy.matrix): the Lie bracket of f and g, an N x 1 symbolic vector
"""
assert f.shape == g.shape, "The f and g vectors must be the same dimension."
assert len(f) == len(q), "The vector field must represent all the coordinates."
return f.jacobian(q) @ g - g.jacobian(q) @ f
|
def lie_bracket(f, g, q):
"""Take the Lie bracket of two vector fields.
[f, g] = (d/dq)f * g - (d/dq)g * f
Args:
f (sympy.matrix): an N x 1 symbolic vector
g (sympy.matrix): an N x 1 symbolic
q (sympy.matrix or List): a length N array like object of coordinates to take partial derivates.
Returns:
[f, g] (sympy.matrix): the Lie bracket of f and g, an N x 1 symbolic vector
"""
assert f.shape == g.shape, 'The f and g vectors must be the same dimension.'
assert len(f) == len(q), 'The vector field must represent all the coordinates.'
return f.jacobian(q) @ g - g.jacobian(q) @ f
|
"""
873. Length of Longest Fibonacci Subsequence
A sequence X_1, X_2, ..., X_n is fibonacci-like if:
n >= 3
X_i + X_{i+1} = X_{i+2} for all i + 2 <= n
Given a strictly increasing array A of positive integers forming a sequence, find the length of the longest fibonacci-like subsequence of A. If one does not exist, return 0.
(Recall that a subsequence is derived from another sequence A by deleting any number of elements (including none) from A, without changing the order of the remaining elements. For example, [3, 5, 8] is a subsequence of [3, 4, 5, 6, 7, 8].)
Example 1:
Input: [1,2,3,4,5,6,7,8]
Output: 5
Explanation:
The longest subsequence that is fibonacci-like: [1,2,3,5,8].
Example 2:
Input: [1,3,7,11,12,14,18]
Output: 3
Explanation:
The longest subsequence that is fibonacci-like:
[1,11,12], [3,11,14] or [7,11,18].
Note:
3 <= A.length <= 1000
1 <= A[0] < A[1] < ... < A[A.length - 1] <= 10^9
(The time limit has been reduced by 50% for submissions in Java, C, and C++.)
"""
class Solution(object):
def lenLongestFibSubseq(self, A):
s = set(A)
n = len(A)
result = 0
for i in range(n-1):
for j in range(i+1,n):
a, b = A[i], A[j]
count = 2
while a+b in s:
a, b = b, a+b
count +=1
result = max(result, count)
return result if result >2 else 0
|
"""
873. Length of Longest Fibonacci Subsequence
A sequence X_1, X_2, ..., X_n is fibonacci-like if:
n >= 3
X_i + X_{i+1} = X_{i+2} for all i + 2 <= n
Given a strictly increasing array A of positive integers forming a sequence, find the length of the longest fibonacci-like subsequence of A. If one does not exist, return 0.
(Recall that a subsequence is derived from another sequence A by deleting any number of elements (including none) from A, without changing the order of the remaining elements. For example, [3, 5, 8] is a subsequence of [3, 4, 5, 6, 7, 8].)
Example 1:
Input: [1,2,3,4,5,6,7,8]
Output: 5
Explanation:
The longest subsequence that is fibonacci-like: [1,2,3,5,8].
Example 2:
Input: [1,3,7,11,12,14,18]
Output: 3
Explanation:
The longest subsequence that is fibonacci-like:
[1,11,12], [3,11,14] or [7,11,18].
Note:
3 <= A.length <= 1000
1 <= A[0] < A[1] < ... < A[A.length - 1] <= 10^9
(The time limit has been reduced by 50% for submissions in Java, C, and C++.)
"""
class Solution(object):
def len_longest_fib_subseq(self, A):
s = set(A)
n = len(A)
result = 0
for i in range(n - 1):
for j in range(i + 1, n):
(a, b) = (A[i], A[j])
count = 2
while a + b in s:
(a, b) = (b, a + b)
count += 1
result = max(result, count)
return result if result > 2 else 0
|
DOCKER_IMAGES = {
"cpu": {
"tensorflow": "ritazh/azk8sml-tensorflow:latest",
},
"gpu": {
"tensorflow": "ritazh/azk8sml-tensorflow:latest-gpu",
}
}
DEFAULT_DOCKER_IMAGE = "ritazh/azk8sml-tensorflow:latest"
DEFAULT_ARCH = "cpu"
|
docker_images = {'cpu': {'tensorflow': 'ritazh/azk8sml-tensorflow:latest'}, 'gpu': {'tensorflow': 'ritazh/azk8sml-tensorflow:latest-gpu'}}
default_docker_image = 'ritazh/azk8sml-tensorflow:latest'
default_arch = 'cpu'
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.Summary = '''
Test remap_stats plugin
'''
# Skip if plugins not present.
Test.SkipUnless(Condition.PluginExists('remap_stats.so'))
Test.SkipIf(Condition.true("Test cannot deterministically wait until the stats appear"))
server = Test.MakeOriginServer("server")
request_header = {
"headers": "GET /argh HTTP/1.1\r\nHost: one\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n",
"timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
ts = Test.MakeATSProcess("ts", command="traffic_manager", select_ports=True)
ts.Disk.plugin_config.AddLine('remap_stats.so')
ts.Disk.remap_config.AddLine(
"map http://one http://127.0.0.1:{0}".format(server.Variables.Port)
)
ts.Disk.remap_config.AddLine(
"map http://two http://127.0.0.1:{0}".format(server.Variables.Port)
)
ts.Disk.records_config.update({
'proxy.config.http.transaction_active_timeout_out': 2,
'proxy.config.http.transaction_no_activity_timeout_out': 2,
'proxy.config.http.connect_attempts_timeout': 2,
})
# 0 Test - Curl host One
tr = Test.AddTestRun("curl host one")
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.Processes.Default.Command = 'curl -o /dev/null -H "Host: one"' + ' http://127.0.0.1:{}/argh'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
# 1 Test - Curl host Two
tr = Test.AddTestRun("curl host two")
tr.Processes.Default.Command = 'curl -o /dev/null -H "Host: two"' + ' http://127.0.0.1:{}/badpath'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
# 2 Test - Gather output
tr = Test.AddTestRun("analyze stats")
tr.Processes.Default.Command = r'traffic_ctl metric match \.\*remap_stats\*'
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.TimeOut = 5
tr.DelayStart = 15
tr.TimeOut = 5
tr.Processes.Default.Streams.stdout = Testers.ContainsExpression(
"plugin.remap_stats.one.status_2xx 1", "expected 2xx on first remap")
tr.Processes.Default.Streams.stdout += Testers.ContainsExpression(
"plugin.remap_stats.two.status_4xx 1", "expected 4xx on second remap")
|
Test.Summary = '\nTest remap_stats plugin\n'
Test.SkipUnless(Condition.PluginExists('remap_stats.so'))
Test.SkipIf(Condition.true('Test cannot deterministically wait until the stats appear'))
server = Test.MakeOriginServer('server')
request_header = {'headers': 'GET /argh HTTP/1.1\r\nHost: one\r\n\r\n', 'timestamp': '1469733493.993', 'body': ''}
response_header = {'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n', 'timestamp': '1469733493.993', 'body': ''}
server.addResponse('sessionlog.json', request_header, response_header)
ts = Test.MakeATSProcess('ts', command='traffic_manager', select_ports=True)
ts.Disk.plugin_config.AddLine('remap_stats.so')
ts.Disk.remap_config.AddLine('map http://one http://127.0.0.1:{0}'.format(server.Variables.Port))
ts.Disk.remap_config.AddLine('map http://two http://127.0.0.1:{0}'.format(server.Variables.Port))
ts.Disk.records_config.update({'proxy.config.http.transaction_active_timeout_out': 2, 'proxy.config.http.transaction_no_activity_timeout_out': 2, 'proxy.config.http.connect_attempts_timeout': 2})
tr = Test.AddTestRun('curl host one')
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts)
tr.Processes.Default.Command = 'curl -o /dev/null -H "Host: one"' + ' http://127.0.0.1:{}/argh'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr = Test.AddTestRun('curl host two')
tr.Processes.Default.Command = 'curl -o /dev/null -H "Host: two"' + ' http://127.0.0.1:{}/badpath'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr = Test.AddTestRun('analyze stats')
tr.Processes.Default.Command = 'traffic_ctl metric match \\.\\*remap_stats\\*'
tr.Processes.Default.Env = ts.Env
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.TimeOut = 5
tr.DelayStart = 15
tr.TimeOut = 5
tr.Processes.Default.Streams.stdout = Testers.ContainsExpression('plugin.remap_stats.one.status_2xx 1', 'expected 2xx on first remap')
tr.Processes.Default.Streams.stdout += Testers.ContainsExpression('plugin.remap_stats.two.status_4xx 1', 'expected 4xx on second remap')
|
# This file has automatically been generated
# biogeme 2.6a [Mon May 14 17:32:05 EDT 2018]
# <a href='http://people.epfl.ch/michel.bierlaire'>Michel Bierlaire</a>, <a href='http://transp-or.epfl.ch'>Transport and Mobility Laboratory</a>, <a href='http://www.epfl.ch'>Ecole Polytechnique Fédérale de Lausanne (EPFL)</a>
# Mon Jun 4 13:57:03 2018</p>
#
ASC_CAR = Beta('ASC_CAR',-1.64275,-10,10,0,'Car cte.' )
B_COST = Beta('B_COST',-0.180929,-10,10,0,'Travel cost' )
B_TIME = Beta('B_TIME',-0.0232704,-10,10,0,'Travel time' )
B_RELIB = Beta('B_RELIB',0.0860714,-10,10,0,'Travel reliability' )
ASC_CARRENTAL = Beta('ASC_CARRENTAL',-3.43973,-10,10,0,'Car Rental cte.' )
ASC_BUS = Beta('ASC_BUS',-2.875,-10,10,0,'Bus cte.' )
ASC_PLANE = Beta('ASC_PLANE',-2.36068,-10,10,0,'Plane cte.' )
ASC_TRAIN = Beta('ASC_TRAIN',-2.0106,-10,10,0,'Train cte.' )
ASC_TRH = Beta('ASC_TRH',0,-10,10,1,'TrH cte.' )
## Code for the sensitivity analysis
names = ['ASC_BUS','ASC_CAR','ASC_CARRENTAL','ASC_PLANE','ASC_TRAIN','B_COST','B_RELIB','B_TIME']
values = [[0.0222299,0.00778935,0.00263632,-0.000888164,0.00902636,0.0024049,0.0105436,-4.32583e-05],[0.00778935,0.0199112,0.0178978,0.0215904,0.0071847,-7.05174e-05,0.00508875,0.000675359],[0.00263632,0.0178978,0.0491379,0.0305725,0.003541,-0.00235489,0.00445526,0.000886736],[-0.000888164,0.0215904,0.0305725,0.0530497,0.00189364,-0.0043733,0.00427259,0.0012203],[0.00902636,0.0071847,0.003541,0.00189364,0.0169265,0.00171577,0.00605909,5.53087e-05],[0.0024049,-7.05174e-05,-0.00235489,-0.0043733,0.00171577,0.00113907,-0.000122535,-9.52213e-05],[0.0105436,0.00508875,0.00445526,0.00427259,0.00605909,-0.000122535,0.0223307,-7.38437e-05],[-4.32583e-05,0.000675359,0.000886736,0.0012203,5.53087e-05,-9.52213e-05,-7.38437e-05,3.91556e-05]]
vc = bioMatrix(8,names,values)
BIOGEME_OBJECT.VARCOVAR = vc
|
asc_car = beta('ASC_CAR', -1.64275, -10, 10, 0, 'Car cte.')
b_cost = beta('B_COST', -0.180929, -10, 10, 0, 'Travel cost')
b_time = beta('B_TIME', -0.0232704, -10, 10, 0, 'Travel time')
b_relib = beta('B_RELIB', 0.0860714, -10, 10, 0, 'Travel reliability')
asc_carrental = beta('ASC_CARRENTAL', -3.43973, -10, 10, 0, 'Car Rental cte.')
asc_bus = beta('ASC_BUS', -2.875, -10, 10, 0, 'Bus cte.')
asc_plane = beta('ASC_PLANE', -2.36068, -10, 10, 0, 'Plane cte.')
asc_train = beta('ASC_TRAIN', -2.0106, -10, 10, 0, 'Train cte.')
asc_trh = beta('ASC_TRH', 0, -10, 10, 1, 'TrH cte.')
names = ['ASC_BUS', 'ASC_CAR', 'ASC_CARRENTAL', 'ASC_PLANE', 'ASC_TRAIN', 'B_COST', 'B_RELIB', 'B_TIME']
values = [[0.0222299, 0.00778935, 0.00263632, -0.000888164, 0.00902636, 0.0024049, 0.0105436, -4.32583e-05], [0.00778935, 0.0199112, 0.0178978, 0.0215904, 0.0071847, -7.05174e-05, 0.00508875, 0.000675359], [0.00263632, 0.0178978, 0.0491379, 0.0305725, 0.003541, -0.00235489, 0.00445526, 0.000886736], [-0.000888164, 0.0215904, 0.0305725, 0.0530497, 0.00189364, -0.0043733, 0.00427259, 0.0012203], [0.00902636, 0.0071847, 0.003541, 0.00189364, 0.0169265, 0.00171577, 0.00605909, 5.53087e-05], [0.0024049, -7.05174e-05, -0.00235489, -0.0043733, 0.00171577, 0.00113907, -0.000122535, -9.52213e-05], [0.0105436, 0.00508875, 0.00445526, 0.00427259, 0.00605909, -0.000122535, 0.0223307, -7.38437e-05], [-4.32583e-05, 0.000675359, 0.000886736, 0.0012203, 5.53087e-05, -9.52213e-05, -7.38437e-05, 3.91556e-05]]
vc = bio_matrix(8, names, values)
BIOGEME_OBJECT.VARCOVAR = vc
|
def longest_palindromic_substring_DP(s):
S = [[False for i in range(len(s))] for j in range(len(s))]
max_palindrome = ""
for i in range(len(s))[::-1]:
for j in range(i, len(s)):
S[i][j] = s[i] == s[j] and (j - i < 3 or S[i+1][j-1])
if S[i][j] and j - i + 1 > len(max_palindrome):
max_palindrome = s[i:j+1]
return max_palindrome
def longest_palindromic_substring_expansion(s):
max_palindrome = ""
for i in range(len(s) * 2 - 1):
if i % 2 == 0:
o = 0
ind = i // 2
while ind + o < len(s) and ind - o >= 0:
if(s[ind + o] != s[ind - o]):
break
if ind + o - (ind - o) + 1 > len(max_palindrome):
max_palindrome = s[ind-o:ind+o + 1]
o += 1
else:
o = 0
sind = i // 2
eind = i // 2 + 1
while sind - o >= 0 and eind + o < len(s):
if(s[sind - o] != s[eind + o]):
break
if eind + o - (sind - o) + 1 > len(max_palindrome):
max_palindrome = s[sind - o:eind + o + 1]
o += 1
return max_palindrome
input_string = "abbbacdcaacdca"
ans_DP = longest_palindromic_substring_DP(input_string)
ans_expansion = longest_palindromic_substring_expansion(input_string)
print("DP Solution: {}, Expansion Solution: {}".format(ans_DP, ans_expansion))
|
def longest_palindromic_substring_dp(s):
s = [[False for i in range(len(s))] for j in range(len(s))]
max_palindrome = ''
for i in range(len(s))[::-1]:
for j in range(i, len(s)):
S[i][j] = s[i] == s[j] and (j - i < 3 or S[i + 1][j - 1])
if S[i][j] and j - i + 1 > len(max_palindrome):
max_palindrome = s[i:j + 1]
return max_palindrome
def longest_palindromic_substring_expansion(s):
max_palindrome = ''
for i in range(len(s) * 2 - 1):
if i % 2 == 0:
o = 0
ind = i // 2
while ind + o < len(s) and ind - o >= 0:
if s[ind + o] != s[ind - o]:
break
if ind + o - (ind - o) + 1 > len(max_palindrome):
max_palindrome = s[ind - o:ind + o + 1]
o += 1
else:
o = 0
sind = i // 2
eind = i // 2 + 1
while sind - o >= 0 and eind + o < len(s):
if s[sind - o] != s[eind + o]:
break
if eind + o - (sind - o) + 1 > len(max_palindrome):
max_palindrome = s[sind - o:eind + o + 1]
o += 1
return max_palindrome
input_string = 'abbbacdcaacdca'
ans_dp = longest_palindromic_substring_dp(input_string)
ans_expansion = longest_palindromic_substring_expansion(input_string)
print('DP Solution: {}, Expansion Solution: {}'.format(ans_DP, ans_expansion))
|
class Solution:
def nthUglyNumber(self, n):
ugly = [1]
i2 = i3 = i5 = 0
while len(ugly) < n:
while ugly[i2] * 2 <= ugly[-1]: i2 += 1
while ugly[i3] * 3 <= ugly[-1]: i3 += 1
while ugly[i5] * 5 <= ugly[-1]: i5 += 1
ugly.append(min(ugly[i2] * 2, ugly[i3] * 3, ugly[i5] * 5))
return ugly[-1]
|
class Solution:
def nth_ugly_number(self, n):
ugly = [1]
i2 = i3 = i5 = 0
while len(ugly) < n:
while ugly[i2] * 2 <= ugly[-1]:
i2 += 1
while ugly[i3] * 3 <= ugly[-1]:
i3 += 1
while ugly[i5] * 5 <= ugly[-1]:
i5 += 1
ugly.append(min(ugly[i2] * 2, ugly[i3] * 3, ugly[i5] * 5))
return ugly[-1]
|
def dividableNumberGenerator(limit, number):
dividableNumbers = []
for i in range(0, limit, number):
dividableNumbers.append(i)
return dividableNumbers
def sumDividableNumbers(dividableNumbers):
sum = 0
for i in range(0, len(dividableNumbers)):
sum += dividableNumbers[i]
return sum
print(sumDividableNumbers(dividableNumberGenerator(10000, 7) + dividableNumberGenerator(10000, 9)))
|
def dividable_number_generator(limit, number):
dividable_numbers = []
for i in range(0, limit, number):
dividableNumbers.append(i)
return dividableNumbers
def sum_dividable_numbers(dividableNumbers):
sum = 0
for i in range(0, len(dividableNumbers)):
sum += dividableNumbers[i]
return sum
print(sum_dividable_numbers(dividable_number_generator(10000, 7) + dividable_number_generator(10000, 9)))
|
(
((1,), (), ()),
((2,), (), ()),
((1, 2), (), ()),
((), (0,), ()),
((), (0,), (0,)),
((), (), ()),
((), (), ()),
((), (), ()),
((), (), ()),
((), (0,), (0,)),
)
|
(((1,), (), ()), ((2,), (), ()), ((1, 2), (), ()), ((), (0,), ()), ((), (0,), (0,)), ((), (), ()), ((), (), ()), ((), (), ()), ((), (), ()), ((), (0,), (0,)))
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 16 19:43:28 2020
@author: Ravi
"""
def minimumDistance(arr,n):
a = set(arr)
if len(a) == len(arr):
return -1
li = {}
for i in range(n):
for j in range(n):
if arr[i]==arr[j]:
if i!=j:
if arr[j] not in li:
li[arr[j]] = j
dist = []
for i in li:
x = arr.index(i)
res = abs(x - li[i])
dist.append(res)
return min(dist)
n = int(input())
arr=list(map(int,input().split(" ")))
print(minimumDistance(arr,n))
|
"""
Created on Mon Mar 16 19:43:28 2020
@author: Ravi
"""
def minimum_distance(arr, n):
a = set(arr)
if len(a) == len(arr):
return -1
li = {}
for i in range(n):
for j in range(n):
if arr[i] == arr[j]:
if i != j:
if arr[j] not in li:
li[arr[j]] = j
dist = []
for i in li:
x = arr.index(i)
res = abs(x - li[i])
dist.append(res)
return min(dist)
n = int(input())
arr = list(map(int, input().split(' ')))
print(minimum_distance(arr, n))
|
class Solution:
def __init__(self, w: List[int]):
self.total = sum(w)
for i in range(1, len(w)):
w[i] += w[i-1]
self.w = w
def pickIndex(self) -> int:
ans = 0
stop = randrange(self.total)
l,r = 0, len(self.w)-1
while l <= r:
mid = (l+r)//2
if self.w[mid] > stop:
ans = mid
r = mid-1
else:
l = mid+1
return ans
# Your Solution object will be instantiated and called as such:
# obj = Solution(w)
# param_1 = obj.pickIndex()
|
class Solution:
def __init__(self, w: List[int]):
self.total = sum(w)
for i in range(1, len(w)):
w[i] += w[i - 1]
self.w = w
def pick_index(self) -> int:
ans = 0
stop = randrange(self.total)
(l, r) = (0, len(self.w) - 1)
while l <= r:
mid = (l + r) // 2
if self.w[mid] > stop:
ans = mid
r = mid - 1
else:
l = mid + 1
return ans
|
select_atom ={
1: "H",
3: "Li",
6: "C",
7: "N",
8: "O",
9: "F",
}
select_weight ={
1: 1.00794,
6: 12,
7: 15,
8: 16,
9: 18.998403,
}
|
select_atom = {1: 'H', 3: 'Li', 6: 'C', 7: 'N', 8: 'O', 9: 'F'}
select_weight = {1: 1.00794, 6: 12, 7: 15, 8: 16, 9: 18.998403}
|
config = dict({
"LunarLander-v2": {
"DQN": {
"eff_batch_size" : 128,
"eps_decay" : 0.99,
"gamma" : 0.99,
"tau" : 0.005,
"lr" : 0.0005
},
"EnsembleDQN": {
"eff_batch_size" : 64,
"eps_decay" : 0.99,
"gamma" : 0.99,
"tau" : 0.005,
"lr" : 0.0005
},
"BootstrapDQN":{
"eff_batch_size" : 64,
"eps_decay" : 0.99,
"gamma" : 0.99,
"tau" : 0.005,
"lr" : 0.0005,
"mask" : "bernoulli",
"mask_prob" : 0.9,
"prior_scale" : 10
},
"ProbDQN":{
"eff_batch_size" : 256,
"eps_decay" : 0.991,
"gamma" : 0.99,
"tau" : 0.001,
"lr" : 0.0005,
"loss_att_weight" : 2
},
"IV_EnsembleDQN": {
"eff_batch_size" : 64,
"eps_decay" : 0.99,
"gamma" : 0.99,
"tau" : 0.005,
"lr" : 0.0005,
"dynamic_eps" : True,
"minimal_eff_bs" : 48,
},
"IV_BootstrapDQN":{
"eff_batch_size" : 64,
"eps_decay" : 0.99,
"gamma" : 0.99,
"tau" : 0.005,
"lr" : 0.0005,
"dynamic_eps" : True,
"mask" : "bernoulli",
"mask_prob" : 0.5,
"minimal_eff_bs" : 48,
"prior_scale" : 0.1
},
"IV_ProbEnsembleDQN":{
"eff_batch_size" : 64,
"eps_decay" : 0.99,
"gamma" : 0.99,
"tau" : 0.005,
"lr" : 0.001,
"eps" : 10,
"loss_att_weight" : 3
},
"IV_ProbDQN":{
"eff_batch_size" : 256,
"eps_decay" : 0.991,
"gamma" : 0.99,
"tau" : 0.001,
"lr" : 0.0005,
"loss_att_weight" : 2,
"dynamic_eps" : True,
"minimal_eff_bs" : 208
}
},
"MountainCar-v0":{
"DQN":{
"eff_batch_size" : 256,
"lr" : 0.001,
"eps_decay" : 0.98,
"tau" : 0.01
},
"BootstrapDQN":{
"eff_batch_size" : 256,
"lr" : 0.001,
"eps_decay" : 0.98,
"tau" : 0.05,
"mask_prob" : 0.5,
"prior_scale" : 10
},
"SunriseDQN":{
"eff_batch_size" : 256,
"lr" : 0.001,
"eps_decay" : 0.98,
"tau" : 0.05,
"mask_prob" : 0.5,
"prior_scale" : 10,
"sunrise_temp" : 50
},
"IV_DQN":{
"eff_batch_size" : 256,
"lr" : 0.001,
"eps_decay" : 0.98,
"tau" : 0.05,
"mask_prob" : 0.5,
"prior_scale" : 10,
"eps" : 1000
},
"IV_ProbEnsembleDQN":{
"eff_batch_size" : 256,
"lr" : 0.001,
"eps_decay" : 0.98,
"tau" : 0.05,
"mask_prob" : 0.5,
"prior_scale" : 10,
"eps" : 1000
},
},
"gym_cheetah":{
"EnsembleSAC":{
"eff_batch_size" : 1024,
"mask_prob" : 0.9,
"ucb_lambda" : 0
},
"IV_EnsembleSAC":{
"eff_batch_size" : 1024,
"mask_prob" : 0.9,
"ucb_lambda" : 10,
"minimal_eff_bs_ratio" : 0.99,
"dynamic_eps" : True
},
"IV_ProbEnsembleSAC":{
"eff_batch_size" : 1024,
"mask_prob" : 1,
"ucb_lambda" : 0,
"minimal_eff_bs_ratio" : 0.99,
"dynamic_eps" : True,
"loss_att_weight" : 2
},
"IV_SAC":{
"eff_batch_size" : 1024,
"mask_prob" : 1,
"ucb_lambda" : 0,
"minimal_eff_bs_ratio" : 0.99,
"dynamic_eps" : True,
"loss_att_weight" : 2
},
"IV_ProbSAC":{
"loss_att_weight" : 5,
"minimal_eff_bs_ratio" : 0.5
}
},
"gym_walker2d":{
"EnsembleSAC":{
"eff_batch_size" : 512,
"mask_prob" : 1,
"ucb_lambda" : 1
},
"IV_EnsembleSAC":{
"eff_batch_size" : 1024,
"mask_prob" : 0.9,
"ucb_lambda" : 10,
"minimal_eff_bs_ratio" : 0.8,
"dynamic_eps" : True
},
"IV_ProbEnsembleSAC":{
"eff_batch_size" : 1024,
"mask_prob" : 0.9,
"ucb_lambda" : 10,
"minimal_eff_bs_ratio" : 0.8,
"dynamic_eps" : True,
"loss_att_weight" : 5
},
"IV_SAC":{
"eff_batch_size" : 1024,
"mask_prob" : 0.9,
"ucb_lambda" : 10,
"minimal_eff_bs_ratio" : 0.8,
"dynamic_eps" : True,
"loss_att_weight" : 5
},
},
"gym_hopper":{
"EnsembleSAC":{
"eff_batch_size" : 512,
"mask_prob" : 1,
"ucb_lambda" : 10
},
"IV_ProbEnsembleSAC":{
"eff_batch_size" : 1024,
"mask_prob" : 0.7,
"ucb_lambda" : 10,
"minimal_eff_bs_ratio" : 0.8,
"dynamic_eps" : True,
"loss_att_weight" : 10
},
"IV_SAC":{
"eff_batch_size" : 1024,
"mask_prob" : 0.7,
"ucb_lambda" : 10,
"minimal_eff_bs_ratio" : 0.8,
"dynamic_eps" : True,
"loss_att_weight" : 10
},
},
"gym_ant":{
"EnsembleSAC":{
"eff_batch_size" : 512,
"mask_prob" : 0.9,
"ucb_lambda" : 10
},
"IV_ProbEnsembleSAC":{
"eff_batch_size" : 1024,
"mask_prob" : 1,
"ucb_lambda" : 1,
"minimal_eff_bs_ratio" : 0.9,
"dynamic_eps" : True,
"loss_att_weight" : 5
},
"IV_SAC":{
"eff_batch_size" : 1024,
"mask_prob" : 1,
"ucb_lambda" : 1,
"minimal_eff_bs_ratio" : 0.9,
"dynamic_eps" : True,
"loss_att_weight" : 5
},
},
"cartpole":{
"BootstrapDQN":{
"batch_size" : 128,
"mask_prob" : 5
},
"IV_BootstrapDQN":{
"batch_size" : 128,
"mask_prob" : 0.5,
"minimal_eff_bs_ratio" : 0.99
},
"IV_ProbEnsembleDQN":{
"batch_size" : 128,
"mask_prob" : 0.5,
"minimal_eff_bs_ratio" : 0.99,
"loss_att_weight" : 10
},
"IV_BootstrapDQN":{
"batch_size" : 128,
"mask_prob" : 0.5,
"minimal_eff_bs_ratio" : 0.99,
},
"IV_ProbDQN": {
"loss_att_weight" : 0.1,
"minimal_eff_bs_ratio" : 0.7
},
"ProbEnsembleDQN":{
"batch_size" : 128,
"loss_att_weight" : 10,
"mask_prob" : 0.5
}
}
})
|
config = dict({'LunarLander-v2': {'DQN': {'eff_batch_size': 128, 'eps_decay': 0.99, 'gamma': 0.99, 'tau': 0.005, 'lr': 0.0005}, 'EnsembleDQN': {'eff_batch_size': 64, 'eps_decay': 0.99, 'gamma': 0.99, 'tau': 0.005, 'lr': 0.0005}, 'BootstrapDQN': {'eff_batch_size': 64, 'eps_decay': 0.99, 'gamma': 0.99, 'tau': 0.005, 'lr': 0.0005, 'mask': 'bernoulli', 'mask_prob': 0.9, 'prior_scale': 10}, 'ProbDQN': {'eff_batch_size': 256, 'eps_decay': 0.991, 'gamma': 0.99, 'tau': 0.001, 'lr': 0.0005, 'loss_att_weight': 2}, 'IV_EnsembleDQN': {'eff_batch_size': 64, 'eps_decay': 0.99, 'gamma': 0.99, 'tau': 0.005, 'lr': 0.0005, 'dynamic_eps': True, 'minimal_eff_bs': 48}, 'IV_BootstrapDQN': {'eff_batch_size': 64, 'eps_decay': 0.99, 'gamma': 0.99, 'tau': 0.005, 'lr': 0.0005, 'dynamic_eps': True, 'mask': 'bernoulli', 'mask_prob': 0.5, 'minimal_eff_bs': 48, 'prior_scale': 0.1}, 'IV_ProbEnsembleDQN': {'eff_batch_size': 64, 'eps_decay': 0.99, 'gamma': 0.99, 'tau': 0.005, 'lr': 0.001, 'eps': 10, 'loss_att_weight': 3}, 'IV_ProbDQN': {'eff_batch_size': 256, 'eps_decay': 0.991, 'gamma': 0.99, 'tau': 0.001, 'lr': 0.0005, 'loss_att_weight': 2, 'dynamic_eps': True, 'minimal_eff_bs': 208}}, 'MountainCar-v0': {'DQN': {'eff_batch_size': 256, 'lr': 0.001, 'eps_decay': 0.98, 'tau': 0.01}, 'BootstrapDQN': {'eff_batch_size': 256, 'lr': 0.001, 'eps_decay': 0.98, 'tau': 0.05, 'mask_prob': 0.5, 'prior_scale': 10}, 'SunriseDQN': {'eff_batch_size': 256, 'lr': 0.001, 'eps_decay': 0.98, 'tau': 0.05, 'mask_prob': 0.5, 'prior_scale': 10, 'sunrise_temp': 50}, 'IV_DQN': {'eff_batch_size': 256, 'lr': 0.001, 'eps_decay': 0.98, 'tau': 0.05, 'mask_prob': 0.5, 'prior_scale': 10, 'eps': 1000}, 'IV_ProbEnsembleDQN': {'eff_batch_size': 256, 'lr': 0.001, 'eps_decay': 0.98, 'tau': 0.05, 'mask_prob': 0.5, 'prior_scale': 10, 'eps': 1000}}, 'gym_cheetah': {'EnsembleSAC': {'eff_batch_size': 1024, 'mask_prob': 0.9, 'ucb_lambda': 0}, 'IV_EnsembleSAC': {'eff_batch_size': 1024, 'mask_prob': 0.9, 'ucb_lambda': 10, 'minimal_eff_bs_ratio': 0.99, 'dynamic_eps': True}, 'IV_ProbEnsembleSAC': {'eff_batch_size': 1024, 'mask_prob': 1, 'ucb_lambda': 0, 'minimal_eff_bs_ratio': 0.99, 'dynamic_eps': True, 'loss_att_weight': 2}, 'IV_SAC': {'eff_batch_size': 1024, 'mask_prob': 1, 'ucb_lambda': 0, 'minimal_eff_bs_ratio': 0.99, 'dynamic_eps': True, 'loss_att_weight': 2}, 'IV_ProbSAC': {'loss_att_weight': 5, 'minimal_eff_bs_ratio': 0.5}}, 'gym_walker2d': {'EnsembleSAC': {'eff_batch_size': 512, 'mask_prob': 1, 'ucb_lambda': 1}, 'IV_EnsembleSAC': {'eff_batch_size': 1024, 'mask_prob': 0.9, 'ucb_lambda': 10, 'minimal_eff_bs_ratio': 0.8, 'dynamic_eps': True}, 'IV_ProbEnsembleSAC': {'eff_batch_size': 1024, 'mask_prob': 0.9, 'ucb_lambda': 10, 'minimal_eff_bs_ratio': 0.8, 'dynamic_eps': True, 'loss_att_weight': 5}, 'IV_SAC': {'eff_batch_size': 1024, 'mask_prob': 0.9, 'ucb_lambda': 10, 'minimal_eff_bs_ratio': 0.8, 'dynamic_eps': True, 'loss_att_weight': 5}}, 'gym_hopper': {'EnsembleSAC': {'eff_batch_size': 512, 'mask_prob': 1, 'ucb_lambda': 10}, 'IV_ProbEnsembleSAC': {'eff_batch_size': 1024, 'mask_prob': 0.7, 'ucb_lambda': 10, 'minimal_eff_bs_ratio': 0.8, 'dynamic_eps': True, 'loss_att_weight': 10}, 'IV_SAC': {'eff_batch_size': 1024, 'mask_prob': 0.7, 'ucb_lambda': 10, 'minimal_eff_bs_ratio': 0.8, 'dynamic_eps': True, 'loss_att_weight': 10}}, 'gym_ant': {'EnsembleSAC': {'eff_batch_size': 512, 'mask_prob': 0.9, 'ucb_lambda': 10}, 'IV_ProbEnsembleSAC': {'eff_batch_size': 1024, 'mask_prob': 1, 'ucb_lambda': 1, 'minimal_eff_bs_ratio': 0.9, 'dynamic_eps': True, 'loss_att_weight': 5}, 'IV_SAC': {'eff_batch_size': 1024, 'mask_prob': 1, 'ucb_lambda': 1, 'minimal_eff_bs_ratio': 0.9, 'dynamic_eps': True, 'loss_att_weight': 5}}, 'cartpole': {'BootstrapDQN': {'batch_size': 128, 'mask_prob': 5}, 'IV_BootstrapDQN': {'batch_size': 128, 'mask_prob': 0.5, 'minimal_eff_bs_ratio': 0.99}, 'IV_ProbEnsembleDQN': {'batch_size': 128, 'mask_prob': 0.5, 'minimal_eff_bs_ratio': 0.99, 'loss_att_weight': 10}, 'IV_BootstrapDQN': {'batch_size': 128, 'mask_prob': 0.5, 'minimal_eff_bs_ratio': 0.99}, 'IV_ProbDQN': {'loss_att_weight': 0.1, 'minimal_eff_bs_ratio': 0.7}, 'ProbEnsembleDQN': {'batch_size': 128, 'loss_att_weight': 10, 'mask_prob': 0.5}}})
|
season = str(input())
gender = str(input())
people = int(input())
time = int(input())
winter = False
spring = False
summer = False
girls = False
boys = False
mixed = False
tax = 0
sport = str()
total_price = 0
discount = 0
if season == "Winter":
winter = True
elif season == "Spring":
spring = True
elif season == "Summer":
summer = True
if gender == "boys":
boys = True
elif gender == "girls":
girls = True
elif gender == "mixed":
mixed = True
if people >= 50:
discount = 0.50
elif 20 <= people < 50:
discount = 0.85
elif 10 <= people < 20:
discount = 0.95
else:
discount = 1
if winter:
if boys or girls:
tax = 9.60
total_price = people * tax
if boys:
sport = "Judo"
if girls:
sport = "Gymnastics"
elif mixed:
tax = 10
total_price = people * tax
sport = "Ski"
elif spring:
if boys or girls:
tax = 7.20
total_price = people * tax
if boys:
sport = "Tennis"
if girls:
sport = "Athletics"
elif mixed:
tax = 9.50
total_price = people * tax
sport = "Cycling"
elif summer:
if boys or girls:
tax = 15
total_price = people * tax
if boys:
sport = "Football"
if girls:
sport = "Volleyball"
elif mixed:
tax = 20
total_price = people * tax
sport = "Swimming"
print(f"{sport} {(total_price * time) * discount:.2f} lv. ")
|
season = str(input())
gender = str(input())
people = int(input())
time = int(input())
winter = False
spring = False
summer = False
girls = False
boys = False
mixed = False
tax = 0
sport = str()
total_price = 0
discount = 0
if season == 'Winter':
winter = True
elif season == 'Spring':
spring = True
elif season == 'Summer':
summer = True
if gender == 'boys':
boys = True
elif gender == 'girls':
girls = True
elif gender == 'mixed':
mixed = True
if people >= 50:
discount = 0.5
elif 20 <= people < 50:
discount = 0.85
elif 10 <= people < 20:
discount = 0.95
else:
discount = 1
if winter:
if boys or girls:
tax = 9.6
total_price = people * tax
if boys:
sport = 'Judo'
if girls:
sport = 'Gymnastics'
elif mixed:
tax = 10
total_price = people * tax
sport = 'Ski'
elif spring:
if boys or girls:
tax = 7.2
total_price = people * tax
if boys:
sport = 'Tennis'
if girls:
sport = 'Athletics'
elif mixed:
tax = 9.5
total_price = people * tax
sport = 'Cycling'
elif summer:
if boys or girls:
tax = 15
total_price = people * tax
if boys:
sport = 'Football'
if girls:
sport = 'Volleyball'
elif mixed:
tax = 20
total_price = people * tax
sport = 'Swimming'
print(f'{sport} {total_price * time * discount:.2f} lv. ')
|
work_hours = [('Abby',100),('Billy',400),('Cassie',800)]
def employee_check(work_hours):
current_max = 0
# Set some empty value before the loop
employee_of_month = ''
for employee,hours in work_hours:
if hours > current_max:
current_max = hours
employee_of_month = employee
else:
pass
# Notice the indentation here
return (employee_of_month,current_max)
print(employee_check(work_hours))
|
work_hours = [('Abby', 100), ('Billy', 400), ('Cassie', 800)]
def employee_check(work_hours):
current_max = 0
employee_of_month = ''
for (employee, hours) in work_hours:
if hours > current_max:
current_max = hours
employee_of_month = employee
else:
pass
return (employee_of_month, current_max)
print(employee_check(work_hours))
|
def row_sum_odd_numbers(n):
row_first_odd = int(0.5*(n-1)*n)
odd_list = range(1, (row_first_odd+n)*2, 2)
odd_row = odd_list[row_first_odd:]
return sum(odd_row)
# Example:
# 1
# 3 5
# 7 9 11
# 13 15 17 19
# 21 23 25 27 29
# row_sum_odd_numbers(1); # 1
# row_sum_odd_numbers(2); # 3 + 5 = 8
# print row_sum_odd_numbers(1)
# print row_sum_odd_numbers(2)
# print row_sum_odd_numbers(3)
# print row_sum_odd_numbers(4)
|
def row_sum_odd_numbers(n):
row_first_odd = int(0.5 * (n - 1) * n)
odd_list = range(1, (row_first_odd + n) * 2, 2)
odd_row = odd_list[row_first_odd:]
return sum(odd_row)
|
"""
Tuples. Immutable lists i.e. contents cannot be changed. These are ordered so indexes and duplicates are allowed.
tuples vs lists
fixed length vs variable length
tuples ()
lists []
tuples - immutable
tuples - mutable
"""
my_tuple = ()
print(f"Type is : {type(my_tuple)}")
# Single element in a tuple needs a trick; a trailing comma
my_tuple = (9)
print(f"Contents: {my_tuple}")
print(f"Type is : {type(my_tuple)}")
my_tuple = (9,)
print(f"Contents: {my_tuple}")
print(f"Type is : {type(my_tuple)}")
# Access element
my_tuple = (1, 2, 3, 4, 5)
# 1st element
print(f"First element: {my_tuple[0]}")
# 2nd element
print(f"Second element: {my_tuple[1]}")
# Last element
print(f"Last element: {my_tuple[-1]}")
# 2nd to last element
print(f"Second to last element: {my_tuple[-2]}")
# Tuple assignment is interesting - assign a tuple of variables to a tuple of values & map them
# Sometimes called tuples packing and unpacking. Both tuples need the same number of elements or else there will be a value error
tuple1 = ("Cisco", "2600", "12.4")
# Assign variables to tuple1
(vendor, model, ios) = tuple1
print(f"Vendor: {vendor}\n")
print(f"Model: {model}\n")
print(f"IOS: {ios}")
# Assign variables in one tuple to values in another in one statement
(a, b, c) = (10, 20, 30)
print(f"a: {a}")
print(f"b: {b}")
print(f"c: {c}")
# Find out what methods/functions are available
a = ()
b = []
print(f"Type of a: {type(a)}")
print(f"Type of b: {type(b)}")
print(f"Methods/functions available to a: {dir(a)}")
print(f"Methods/functions available to b: {dir(b)}")
# Find out the number of elements inside a tuple
tuple2 = (1, 2, 3, 4)
print(f"Length of tuple2: {len(tuple2)}")
# Lowest and greatest value in a tuple
print(f"Lowest value in tuple2: {min(tuple2)}")
print(f"Highest value in tuple2: {max(tuple2)}")
# Concatinate and multiply tuple
print(f"Concatinate tuples: {tuple2 + (5, 6, 7)}")
print(f"Multiply tuples: {tuple2 * 2}")
# Slicing is also possible with tuples
print(f"1st 2 elements inside the tuple: {tuple2[0:2]}")
print(f"[:2] returns the same: {tuple2[:2]}")
print(f"Slice from element 1 to the end of the tuple: {tuple2[1:]}")
print(f"Entire tuple: {tuple2[:]}")
print(f"Without the last 2 elements in the tuple: {tuple2[:-2]}")
print(f"Without the first 2 elements in the tuple: {tuple2[-2:]}")
print(f"Insert a step in the slice to show in reverse order: {tuple2[::-1]}")
# Check if an element is a member of a tuple or not
print(f"Is 3 in tuple2: {3 in tuple2}")
print(f"Is 3 not in tuple2: {3 not in tuple2}")
print(f"Is 5 in tuple2: {5 in tuple2}")
# To delete the entire tuple
del tuple2 # This will throw an error if you try to print the tuple as it not defined anymore
|
"""
Tuples. Immutable lists i.e. contents cannot be changed. These are ordered so indexes and duplicates are allowed.
tuples vs lists
fixed length vs variable length
tuples ()
lists []
tuples - immutable
tuples - mutable
"""
my_tuple = ()
print(f'Type is : {type(my_tuple)}')
my_tuple = 9
print(f'Contents: {my_tuple}')
print(f'Type is : {type(my_tuple)}')
my_tuple = (9,)
print(f'Contents: {my_tuple}')
print(f'Type is : {type(my_tuple)}')
my_tuple = (1, 2, 3, 4, 5)
print(f'First element: {my_tuple[0]}')
print(f'Second element: {my_tuple[1]}')
print(f'Last element: {my_tuple[-1]}')
print(f'Second to last element: {my_tuple[-2]}')
tuple1 = ('Cisco', '2600', '12.4')
(vendor, model, ios) = tuple1
print(f'Vendor: {vendor}\n')
print(f'Model: {model}\n')
print(f'IOS: {ios}')
(a, b, c) = (10, 20, 30)
print(f'a: {a}')
print(f'b: {b}')
print(f'c: {c}')
a = ()
b = []
print(f'Type of a: {type(a)}')
print(f'Type of b: {type(b)}')
print(f'Methods/functions available to a: {dir(a)}')
print(f'Methods/functions available to b: {dir(b)}')
tuple2 = (1, 2, 3, 4)
print(f'Length of tuple2: {len(tuple2)}')
print(f'Lowest value in tuple2: {min(tuple2)}')
print(f'Highest value in tuple2: {max(tuple2)}')
print(f'Concatinate tuples: {tuple2 + (5, 6, 7)}')
print(f'Multiply tuples: {tuple2 * 2}')
print(f'1st 2 elements inside the tuple: {tuple2[0:2]}')
print(f'[:2] returns the same: {tuple2[:2]}')
print(f'Slice from element 1 to the end of the tuple: {tuple2[1:]}')
print(f'Entire tuple: {tuple2[:]}')
print(f'Without the last 2 elements in the tuple: {tuple2[:-2]}')
print(f'Without the first 2 elements in the tuple: {tuple2[-2:]}')
print(f'Insert a step in the slice to show in reverse order: {tuple2[::-1]}')
print(f'Is 3 in tuple2: {3 in tuple2}')
print(f'Is 3 not in tuple2: {3 not in tuple2}')
print(f'Is 5 in tuple2: {5 in tuple2}')
del tuple2
|
# -*- coding: utf-8 -*-
# Scrapy settings for Downloader project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
#
BOT_NAME = 'CNSpider'
SPIDER_MODULES = ['CNSpider.spiders']
NEWSPIDER_MODULE = 'CNSpider.spiders'
RANDOMIZE_DOWNLOAD_DELAY = True
#DOWNLOAD_DELAY = 1
COOKIES_ENABLED = False
RETRY_ENABLED = False
DOWNLOADER_MIDDLEWARES = {
'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware' : None,
'CNSpider.rotate_useragent.RotateUserAgentMiddleware' :400
}
SPIDER_MIDDLEWARES = {
'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 500,
}
|
bot_name = 'CNSpider'
spider_modules = ['CNSpider.spiders']
newspider_module = 'CNSpider.spiders'
randomize_download_delay = True
cookies_enabled = False
retry_enabled = False
downloader_middlewares = {'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': None, 'CNSpider.rotate_useragent.RotateUserAgentMiddleware': 400}
spider_middlewares = {'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 500}
|
#
# Copyright (c) 2019 MagicStack Inc.
# All rights reserved.
#
# See LICENSE for details.
##
GET_USER = """
SELECT User {
id,
name,
image,
latest_reviews := (
WITH UserReviews := User.<author[IS Review]
SELECT UserReviews {
id,
body,
rating,
movie: {
id,
image,
title,
avg_rating
}
}
ORDER BY .creation_time DESC
LIMIT 10
)
}
FILTER .id = <uuid>$id
"""
GET_MOVIE = """
SELECT Movie {
id,
image,
title,
year,
description,
avg_rating,
directors: {
id,
full_name,
image,
}
ORDER BY @list_order EMPTY LAST
THEN .last_name,
cast: {
id,
full_name,
image,
}
ORDER BY @list_order EMPTY LAST
THEN .last_name,
reviews := (
SELECT Movie.<movie[IS Review] {
id,
body,
rating,
author: {
id,
name,
image,
}
}
ORDER BY .creation_time DESC
),
}
FILTER .id = <uuid>$id
"""
GET_PERSON = """
SELECT Person {
id,
full_name,
image,
bio,
acted_in := (
WITH M := Person.<cast[IS Movie]
SELECT M {
id,
image,
title,
year,
avg_rating
}
ORDER BY .year ASC THEN .title ASC
),
directed := (
WITH M := Person.<directors[IS Movie]
SELECT M {
id,
image,
title,
year,
avg_rating
}
ORDER BY .year ASC THEN .title ASC
),
}
FILTER .id = <uuid>$id
"""
UPDATE_MOVIE = """
SELECT (
UPDATE Movie
FILTER .id = <uuid>$id
SET {
title := .title ++ '---' ++ <str>$suffix
}
) {
id,
title
}
"""
INSERT_USER = """
SELECT (
INSERT User {
name := <str>$name,
image := <str>$image,
}
) {
id,
name,
image,
}
"""
INSERT_MOVIE = """
SELECT (
INSERT Movie {
title := <str>$title,
image := <str>$image,
description := <str>$description,
year := <int64>$year,
directors := (
SELECT Person
FILTER .id = (<uuid>$d_id)
),
cast := (
SELECT Person
FILTER .id IN array_unpack(<array<uuid>>$cast)
),
}
) {
id,
title,
image,
description,
year,
directors: {
id,
full_name,
image,
}
ORDER BY .last_name,
cast: {
id,
full_name,
image,
}
ORDER BY .last_name,
}
"""
INSERT_MOVIE_PLUS = """
SELECT (
INSERT Movie {
title := <str>$title,
image := <str>$image,
description := <str>$description,
year := <int64>$year,
directors := (
INSERT Person {
first_name := <str>$dfn,
last_name := <str>$dln,
image := <str>$dimg,
}
),
cast := {(
INSERT Person {
first_name := <str>$cfn0,
last_name := <str>$cln0,
image := <str>$cimg0,
}
), (
INSERT Person {
first_name := <str>$cfn1,
last_name := <str>$cln1,
image := <str>$cimg1,
}
)},
}
) {
id,
title,
image,
description,
year,
directors: {
id,
full_name,
image,
}
ORDER BY .last_name,
cast: {
id,
full_name,
image,
}
ORDER BY .last_name,
}
"""
|
get_user = '\n SELECT User {\n id,\n name,\n image,\n latest_reviews := (\n WITH UserReviews := User.<author[IS Review]\n SELECT UserReviews {\n id,\n body,\n rating,\n movie: {\n id,\n image,\n title,\n avg_rating\n }\n }\n ORDER BY .creation_time DESC\n LIMIT 10\n )\n }\n FILTER .id = <uuid>$id\n'
get_movie = '\n SELECT Movie {\n id,\n image,\n title,\n year,\n description,\n avg_rating,\n\n directors: {\n id,\n full_name,\n image,\n }\n ORDER BY @list_order EMPTY LAST\n THEN .last_name,\n\n cast: {\n id,\n full_name,\n image,\n }\n ORDER BY @list_order EMPTY LAST\n THEN .last_name,\n\n reviews := (\n SELECT Movie.<movie[IS Review] {\n id,\n body,\n rating,\n author: {\n id,\n name,\n image,\n }\n }\n ORDER BY .creation_time DESC\n ),\n }\n FILTER .id = <uuid>$id\n'
get_person = '\n SELECT Person {\n id,\n full_name,\n image,\n bio,\n\n acted_in := (\n WITH M := Person.<cast[IS Movie]\n SELECT M {\n id,\n image,\n title,\n year,\n avg_rating\n }\n ORDER BY .year ASC THEN .title ASC\n ),\n\n directed := (\n WITH M := Person.<directors[IS Movie]\n SELECT M {\n id,\n image,\n title,\n year,\n avg_rating\n }\n ORDER BY .year ASC THEN .title ASC\n ),\n }\n FILTER .id = <uuid>$id\n'
update_movie = "\n SELECT (\n UPDATE Movie\n FILTER .id = <uuid>$id\n SET {\n title := .title ++ '---' ++ <str>$suffix\n }\n ) {\n id,\n title\n }\n"
insert_user = '\n SELECT (\n INSERT User {\n name := <str>$name,\n image := <str>$image,\n }\n ) {\n id,\n name,\n image,\n }\n'
insert_movie = '\n SELECT (\n INSERT Movie {\n title := <str>$title,\n image := <str>$image,\n description := <str>$description,\n year := <int64>$year,\n directors := (\n SELECT Person\n FILTER .id = (<uuid>$d_id)\n ),\n cast := (\n SELECT Person\n FILTER .id IN array_unpack(<array<uuid>>$cast)\n ),\n }\n ) {\n id,\n title,\n image,\n description,\n year,\n directors: {\n id,\n full_name,\n image,\n }\n ORDER BY .last_name,\n\n cast: {\n id,\n full_name,\n image,\n }\n ORDER BY .last_name,\n }\n'
insert_movie_plus = '\n SELECT (\n INSERT Movie {\n title := <str>$title,\n image := <str>$image,\n description := <str>$description,\n year := <int64>$year,\n directors := (\n INSERT Person {\n first_name := <str>$dfn,\n last_name := <str>$dln,\n image := <str>$dimg,\n }\n ),\n cast := {(\n INSERT Person {\n first_name := <str>$cfn0,\n last_name := <str>$cln0,\n image := <str>$cimg0,\n }\n ), (\n INSERT Person {\n first_name := <str>$cfn1,\n last_name := <str>$cln1,\n image := <str>$cimg1,\n }\n )},\n }\n ) {\n id,\n title,\n image,\n description,\n year,\n directors: {\n id,\n full_name,\n image,\n }\n ORDER BY .last_name,\n\n cast: {\n id,\n full_name,\n image,\n }\n ORDER BY .last_name,\n }\n'
|
sequence = [1]
n = 0
while n < 40:
sequence.append(sequence[n] + sequence[n-1])
n += 1
print('The Fibonacci sequence, up to the 30th term, is \n' + str(sequence) + '.')
pisano = input("Please pick a number. ")
psequence = []
for item in sequence:
psequence.append(item % pisano)
print(psequence)
# for item in sequence:
# if item%pisano != 0:
# psequence.append(item%pisano)
# else:
# print('The Pisano period of ' + str(pisano) + ' for the Fibonacci sequence is ' + str(psequence) + '.')
# break
|
sequence = [1]
n = 0
while n < 40:
sequence.append(sequence[n] + sequence[n - 1])
n += 1
print('The Fibonacci sequence, up to the 30th term, is \n' + str(sequence) + '.')
pisano = input('Please pick a number. ')
psequence = []
for item in sequence:
psequence.append(item % pisano)
print(psequence)
|
class OperationHolderMixin:
def __and__(self, other):
return OperandHolder(AND, self, other)
def __or__(self, other):
return OperandHolder(OR, self, other)
def __rand__(self, other):
return OperandHolder(AND, other, self)
def __ror__(self, other):
return OperandHolder(OR, other, self)
def __invert__(self):
return SingleOperandHolder(NOT, self)
class SingleOperandHolder(OperationHolderMixin):
def __init__(self, operator_class, op1_class):
self.operator_class = operator_class
self.op1_class = op1_class
def __call__(self, *args, **kwargs):
op1 = self.op1_class(*args, **kwargs)
return self.operator_class(op1)
class OperandHolder(OperationHolderMixin):
def __init__(self, operator_class, op1_class, op2_class):
self.operator_class = operator_class
self.op1_class = op1_class
self.op2_class = op2_class
def __call__(self, *args, **kwargs):
op1 = self.op1_class(*args, **kwargs)
op2 = self.op2_class(*args, **kwargs)
return self.operator_class(op1, op2)
class AND:
def __init__(self, op1, op2):
self.op1 = op1
self.op2 = op2
def has_permission(self, context):
return (
self.op1.has_permission(context) and
self.op2.has_permission(context)
)
def has_object_permission(self, context, obj):
return (
self.op1.has_object_permission(context, obj) and
self.op2.has_object_permission(context, obj)
)
class OR:
def __init__(self, op1, op2):
self.op1 = op1
self.op2 = op2
def has_permission(self, context):
return (
self.op1.has_permission(context) or
self.op2.has_permission(context)
)
def has_object_permission(self, context, obj):
return (
self.op1.has_object_permission(context, obj) or
self.op2.has_object_permission(context, obj)
)
class NOT:
def __init__(self, op1):
self.op1 = op1
def has_permission(self, context):
return not self.op1.has_permission(context)
def has_object_permission(self, context, obj):
return not self.op1.has_object_permission(context, obj)
class BasePermissionMetaclass(OperationHolderMixin, type):
pass
|
class Operationholdermixin:
def __and__(self, other):
return operand_holder(AND, self, other)
def __or__(self, other):
return operand_holder(OR, self, other)
def __rand__(self, other):
return operand_holder(AND, other, self)
def __ror__(self, other):
return operand_holder(OR, other, self)
def __invert__(self):
return single_operand_holder(NOT, self)
class Singleoperandholder(OperationHolderMixin):
def __init__(self, operator_class, op1_class):
self.operator_class = operator_class
self.op1_class = op1_class
def __call__(self, *args, **kwargs):
op1 = self.op1_class(*args, **kwargs)
return self.operator_class(op1)
class Operandholder(OperationHolderMixin):
def __init__(self, operator_class, op1_class, op2_class):
self.operator_class = operator_class
self.op1_class = op1_class
self.op2_class = op2_class
def __call__(self, *args, **kwargs):
op1 = self.op1_class(*args, **kwargs)
op2 = self.op2_class(*args, **kwargs)
return self.operator_class(op1, op2)
class And:
def __init__(self, op1, op2):
self.op1 = op1
self.op2 = op2
def has_permission(self, context):
return self.op1.has_permission(context) and self.op2.has_permission(context)
def has_object_permission(self, context, obj):
return self.op1.has_object_permission(context, obj) and self.op2.has_object_permission(context, obj)
class Or:
def __init__(self, op1, op2):
self.op1 = op1
self.op2 = op2
def has_permission(self, context):
return self.op1.has_permission(context) or self.op2.has_permission(context)
def has_object_permission(self, context, obj):
return self.op1.has_object_permission(context, obj) or self.op2.has_object_permission(context, obj)
class Not:
def __init__(self, op1):
self.op1 = op1
def has_permission(self, context):
return not self.op1.has_permission(context)
def has_object_permission(self, context, obj):
return not self.op1.has_object_permission(context, obj)
class Basepermissionmetaclass(OperationHolderMixin, type):
pass
|
class InvalidBackend(Exception):
pass
class NodeDoesNotExist(Exception):
pass
class PersistenceError(Exception):
pass
|
class Invalidbackend(Exception):
pass
class Nodedoesnotexist(Exception):
pass
class Persistenceerror(Exception):
pass
|
# -*- coding: utf-8 -*-
class WebsocketError:
CodeInvalidSession = 9001
CodeConnCloseErr = 9005
class AuthenticationFailedError(RuntimeError):
def __init__(self, msg):
self.msgs = msg
def __str__(self):
return self.msgs
class NotFoundError(RuntimeError):
def __init__(self, msg):
self.msgs = msg
def __str__(self):
return self.msgs
class MethodNotAllowedError(RuntimeError):
def __init__(self, msg):
self.msgs = msg
def __str__(self):
return self.msgs
class SequenceNumberError(RuntimeError):
def __init__(self, msg):
self.msgs = msg
def __str__(self):
return self.msgs
class ServerError(RuntimeError):
def __init__(self, msg):
self.msgs = msg
def __str__(self):
return self.msgs
|
class Websocketerror:
code_invalid_session = 9001
code_conn_close_err = 9005
class Authenticationfailederror(RuntimeError):
def __init__(self, msg):
self.msgs = msg
def __str__(self):
return self.msgs
class Notfounderror(RuntimeError):
def __init__(self, msg):
self.msgs = msg
def __str__(self):
return self.msgs
class Methodnotallowederror(RuntimeError):
def __init__(self, msg):
self.msgs = msg
def __str__(self):
return self.msgs
class Sequencenumbererror(RuntimeError):
def __init__(self, msg):
self.msgs = msg
def __str__(self):
return self.msgs
class Servererror(RuntimeError):
def __init__(self, msg):
self.msgs = msg
def __str__(self):
return self.msgs
|
# Problem: https://www.hackerrank.com/challenges/repeated-string/problem
# Score: 20
def repeated_string(s, n):
return n // len(s) * s.count('a') + s[0: n % len(s)].count('a')
s = input()
n = int(input())
print(repeated_string(s, n))
|
def repeated_string(s, n):
return n // len(s) * s.count('a') + s[0:n % len(s)].count('a')
s = input()
n = int(input())
print(repeated_string(s, n))
|
num1 = int(input())
num2 = int(input())
if num1>=num2:
print(num1)
else:
print(num2)
|
num1 = int(input())
num2 = int(input())
if num1 >= num2:
print(num1)
else:
print(num2)
|
class EnergyAnalysisSurface(Element, IDisposable):
"""
Analytical surface.
The collection of analytic openings belonging to this analytical parent surface
"""
def Dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def GetAdjacentAnalyticalSpace(self):
"""
GetAdjacentAnalyticalSpace(self: EnergyAnalysisSurface) -> EnergyAnalysisSpace
Gets the secondary adjacent analytical space this surface is associated with.
Returns: The secondary analytical space.
"""
pass
def GetAnalyticalOpenings(self):
"""
GetAnalyticalOpenings(self: EnergyAnalysisSurface) -> IList[EnergyAnalysisOpening]
Returns the analytical openings of the analytical surface.
Returns: The collection of analytical openings.
"""
pass
def GetAnalyticalSpace(self):
"""
GetAnalyticalSpace(self: EnergyAnalysisSurface) -> EnergyAnalysisSpace
Gets the primary analytical space this surface is associated with.
Returns: The primary analytical space.
"""
pass
def getBoundingBox(self, *args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def GetPolyloop(self):
"""
GetPolyloop(self: EnergyAnalysisSurface) -> Polyloop
Gets the planar polygon describing the opening geometry.
Returns: The planar polygon describing the opening geometry.
"""
pass
def ReleaseUnmanagedResources(self, *args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def setElementType(self, *args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def __enter__(self, *args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
Azimuth = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""The azimuth angle for this surface.
Get: Azimuth(self: EnergyAnalysisSurface) -> float
"""
CADLinkUniqueId = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The unique id of the originating CAD object's link (linked document) associated with this surface.
Get: CADLinkUniqueId(self: EnergyAnalysisSurface) -> str
"""
CADObjectUniqueId = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The unique id of the originating CAD object (model element) associated with this surface.
Get: CADObjectUniqueId(self: EnergyAnalysisSurface) -> str
"""
Corner = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""The lower-left coordinate for the analytical rectangular geometry viewed from outside.
Get: Corner(self: EnergyAnalysisSurface) -> XYZ
"""
Height = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""The height of the analytical rectangular geometry.
Get: Height(self: EnergyAnalysisSurface) -> float
"""
Normal = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""The outward normal for this surface.
Get: Normal(self: EnergyAnalysisSurface) -> XYZ
"""
OriginatingElementDescription = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The description for the originating Revit element.
Get: OriginatingElementDescription(self: EnergyAnalysisSurface) -> str
"""
SurfaceId = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""The unique identifier for the surface.
Get: SurfaceId(self: EnergyAnalysisSurface) -> str
"""
SurfaceName = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The unique name identifier for this surface.
Get: SurfaceName(self: EnergyAnalysisSurface) -> str
"""
SurfaceType = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""The analytical surface type.
Get: SurfaceType(self: EnergyAnalysisSurface) -> EnergyAnalysisSurfaceType
"""
Tilt = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""The tilt angle for this surface.
Get: Tilt(self: EnergyAnalysisSurface) -> float
"""
Type = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""The gbXML surface type attribute.
Get: Type(self: EnergyAnalysisSurface) -> gbXMLSurfaceType
"""
Width = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""The width of the analytical rectangular geometry.
Get: Width(self: EnergyAnalysisSurface) -> float
"""
|
class Energyanalysissurface(Element, IDisposable):
"""
Analytical surface.
The collection of analytic openings belonging to this analytical parent surface
"""
def dispose(self):
""" Dispose(self: Element,A_0: bool) """
pass
def get_adjacent_analytical_space(self):
"""
GetAdjacentAnalyticalSpace(self: EnergyAnalysisSurface) -> EnergyAnalysisSpace
Gets the secondary adjacent analytical space this surface is associated with.
Returns: The secondary analytical space.
"""
pass
def get_analytical_openings(self):
"""
GetAnalyticalOpenings(self: EnergyAnalysisSurface) -> IList[EnergyAnalysisOpening]
Returns the analytical openings of the analytical surface.
Returns: The collection of analytical openings.
"""
pass
def get_analytical_space(self):
"""
GetAnalyticalSpace(self: EnergyAnalysisSurface) -> EnergyAnalysisSpace
Gets the primary analytical space this surface is associated with.
Returns: The primary analytical space.
"""
pass
def get_bounding_box(self, *args):
""" getBoundingBox(self: Element,view: View) -> BoundingBoxXYZ """
pass
def get_polyloop(self):
"""
GetPolyloop(self: EnergyAnalysisSurface) -> Polyloop
Gets the planar polygon describing the opening geometry.
Returns: The planar polygon describing the opening geometry.
"""
pass
def release_unmanaged_resources(self, *args):
""" ReleaseUnmanagedResources(self: Element,disposing: bool) """
pass
def set_element_type(self, *args):
""" setElementType(self: Element,type: ElementType,incompatibleExceptionMessage: str) """
pass
def __enter__(self, *args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
azimuth = property(lambda self: object(), lambda self, v: None, lambda self: None)
'The azimuth angle for this surface.\n\n\n\nGet: Azimuth(self: EnergyAnalysisSurface) -> float\n\n\n\n'
cad_link_unique_id = property(lambda self: object(), lambda self, v: None, lambda self: None)
"The unique id of the originating CAD object's link (linked document) associated with this surface.\n\n\n\nGet: CADLinkUniqueId(self: EnergyAnalysisSurface) -> str\n\n\n\n"
cad_object_unique_id = property(lambda self: object(), lambda self, v: None, lambda self: None)
'The unique id of the originating CAD object (model element) associated with this surface.\n\n\n\nGet: CADObjectUniqueId(self: EnergyAnalysisSurface) -> str\n\n\n\n'
corner = property(lambda self: object(), lambda self, v: None, lambda self: None)
'The lower-left coordinate for the analytical rectangular geometry viewed from outside.\n\n\n\nGet: Corner(self: EnergyAnalysisSurface) -> XYZ\n\n\n\n'
height = property(lambda self: object(), lambda self, v: None, lambda self: None)
'The height of the analytical rectangular geometry.\n\n\n\nGet: Height(self: EnergyAnalysisSurface) -> float\n\n\n\n'
normal = property(lambda self: object(), lambda self, v: None, lambda self: None)
'The outward normal for this surface.\n\n\n\nGet: Normal(self: EnergyAnalysisSurface) -> XYZ\n\n\n\n'
originating_element_description = property(lambda self: object(), lambda self, v: None, lambda self: None)
'The description for the originating Revit element.\n\n\n\nGet: OriginatingElementDescription(self: EnergyAnalysisSurface) -> str\n\n\n\n'
surface_id = property(lambda self: object(), lambda self, v: None, lambda self: None)
'The unique identifier for the surface.\n\n\n\nGet: SurfaceId(self: EnergyAnalysisSurface) -> str\n\n\n\n'
surface_name = property(lambda self: object(), lambda self, v: None, lambda self: None)
'The unique name identifier for this surface.\n\n\n\nGet: SurfaceName(self: EnergyAnalysisSurface) -> str\n\n\n\n'
surface_type = property(lambda self: object(), lambda self, v: None, lambda self: None)
'The analytical surface type.\n\n\n\nGet: SurfaceType(self: EnergyAnalysisSurface) -> EnergyAnalysisSurfaceType\n\n\n\n'
tilt = property(lambda self: object(), lambda self, v: None, lambda self: None)
'The tilt angle for this surface.\n\n\n\nGet: Tilt(self: EnergyAnalysisSurface) -> float\n\n\n\n'
type = property(lambda self: object(), lambda self, v: None, lambda self: None)
'The gbXML surface type attribute.\n\n\n\nGet: Type(self: EnergyAnalysisSurface) -> gbXMLSurfaceType\n\n\n\n'
width = property(lambda self: object(), lambda self, v: None, lambda self: None)
'The width of the analytical rectangular geometry.\n\n\n\nGet: Width(self: EnergyAnalysisSurface) -> float\n\n\n\n'
|
__doc__ = """An object oriented solution to the problem."""
class Person:
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
def __str__(self):
return self.name
class FamilyMember(Person):
def __init__(self, name):
self.name = name
def is_attendee_a_family_member(attendee):
return isinstance(attendee, FamilyMember)
def get_solution(attendees, family_members):
# head is always an attendee
attendees_who_are_family_members = [family_members[0]]
attendees_who_are_guests = []
for attendee in attendees:
if is_attendee_a_family_member(attendee):
attendees_who_are_family_members.append(attendee)
else:
attendees_who_are_guests.append(attendee)
# family members who are not attendees
family_members_who_are_not_attendees = []
for member in family_members:
# the below if condition is a bit complicated and
# relies again on family members' names being unique
# check if the "name" of the family member is present in the "names" of the attendees who are family members
# hence the "member.name" and "x.name" in the below line
if member.name not in [x.name for x in attendees_who_are_family_members]:
family_members_who_are_not_attendees.append(member)
return (
attendees_who_are_family_members,
family_members_who_are_not_attendees,
attendees_who_are_guests,
)
if __name__ == "__main__":
# # Example 1:
# family_members = [
# "head",
# "member 1",
# "member 2",
# "member 3",
# "member 4",
# "member 5",
# ]
# attendees = ["member 1", "guest 1", "member 2", "member 5", "guest 5"]
# # here the family members and attendees are still "STRING" objects.
# # let's change that with relevant object type
# family_members = [FamilyMember(x) for x in family_members]
# # print(family_members)
# # print([x.name for x in family_members])
# attendees = [
# FamilyMember("member 1"),
# Person("guest 1"),
# FamilyMember("member 2"),
# FamilyMember("member 5"),
# Person("guest 5"),
# ]
# # print(attendees)
# # print([(type(x).__name__, x.name) for x in attendees])
# Example 2"
family_members = ["Judy", "Jack", "Jessy", "David", "Gloria"]
attendees = [
"Jack",
"David",
"Judy",
]
family_members = [FamilyMember(x) for x in family_members]
# this part is somewhat ugly, have to manually create the "types" from names here
attendees = [FamilyMember("Jack"), FamilyMember("David"), Person("Judy")]
a, b, c = get_solution(attendees, family_members)
print("i", a)
print("ii", b)
print("iii", c)
|
__doc__ = 'An object oriented solution to the problem.'
class Person:
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
def __str__(self):
return self.name
class Familymember(Person):
def __init__(self, name):
self.name = name
def is_attendee_a_family_member(attendee):
return isinstance(attendee, FamilyMember)
def get_solution(attendees, family_members):
attendees_who_are_family_members = [family_members[0]]
attendees_who_are_guests = []
for attendee in attendees:
if is_attendee_a_family_member(attendee):
attendees_who_are_family_members.append(attendee)
else:
attendees_who_are_guests.append(attendee)
family_members_who_are_not_attendees = []
for member in family_members:
if member.name not in [x.name for x in attendees_who_are_family_members]:
family_members_who_are_not_attendees.append(member)
return (attendees_who_are_family_members, family_members_who_are_not_attendees, attendees_who_are_guests)
if __name__ == '__main__':
family_members = ['Judy', 'Jack', 'Jessy', 'David', 'Gloria']
attendees = ['Jack', 'David', 'Judy']
family_members = [family_member(x) for x in family_members]
attendees = [family_member('Jack'), family_member('David'), person('Judy')]
(a, b, c) = get_solution(attendees, family_members)
print('i', a)
print('ii', b)
print('iii', c)
|
# -- Project information -----------------------------------------------------
project = 'LUNA'
copyright = '2020 Great Scott Gadgets'
author = 'Katherine J. Temkin'
# -- General configuration ---------------------------------------------------
master_doc = 'index'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
html_css_files = ['status.css']
# -- Options for automatic documentation -------------------------------------
# Skip documenting Tests.
def autodoc_skip_member_handler(app, what, name, obj, skip, options):
return \
name.endswith("Test") or \
name.startswith('_') or \
(name == "elaborate")
def setup(app):
app.connect('autodoc-skip-member', autodoc_skip_member_handler)
|
project = 'LUNA'
copyright = '2020 Great Scott Gadgets'
author = 'Katherine J. Temkin'
master_doc = 'index'
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
html_css_files = ['status.css']
def autodoc_skip_member_handler(app, what, name, obj, skip, options):
return name.endswith('Test') or name.startswith('_') or name == 'elaborate'
def setup(app):
app.connect('autodoc-skip-member', autodoc_skip_member_handler)
|
def sample_single_dim(action_space_list_each, is_act_continuous):
each = []
if is_act_continuous:
each = action_space_list_each.sample()
else:
if action_space_list_each.__class__.__name__ == "Discrete":
each = [0] * action_space_list_each.n
idx = action_space_list_each.sample()
each[idx] = 1
elif action_space_list_each.__class__.__name__ == "MultiDiscreteParticle":
each = []
nvec = action_space_list_each.high - action_space_list_each.low + 1
sample_indexes = action_space_list_each.sample()
for i in range(len(nvec)):
dim = nvec[i]
new_action = [0] * dim
index = sample_indexes[i]
new_action[index] = 1
each.extend(new_action)
return each
def sample(action_space_list_each, is_act_continuous):
player = []
if is_act_continuous:
for j in range(len(action_space_list_each)):
each = action_space_list_each[j].sample()
player.append(each)
else:
player = []
for j in range(len(action_space_list_each)):
# each = [0] * action_space_list_each[j]
# idx = np.random.randint(action_space_list_each[j])
if action_space_list_each[j].__class__.__name__ == "Discrete":
each = [0] * action_space_list_each[j].n
idx = action_space_list_each[j].sample()
each[idx] = 1
player.append(each)
elif action_space_list_each[j].__class__.__name__ == "MultiDiscreteParticle":
each = []
nvec = action_space_list_each[j].high
sample_indexes = action_space_list_each[j].sample()
for i in range(len(nvec)):
dim = nvec[i] + 1
new_action = [0] * dim
index = sample_indexes[i]
new_action[index] = 1
each.extend(new_action)
player.append(each)
return player
|
def sample_single_dim(action_space_list_each, is_act_continuous):
each = []
if is_act_continuous:
each = action_space_list_each.sample()
elif action_space_list_each.__class__.__name__ == 'Discrete':
each = [0] * action_space_list_each.n
idx = action_space_list_each.sample()
each[idx] = 1
elif action_space_list_each.__class__.__name__ == 'MultiDiscreteParticle':
each = []
nvec = action_space_list_each.high - action_space_list_each.low + 1
sample_indexes = action_space_list_each.sample()
for i in range(len(nvec)):
dim = nvec[i]
new_action = [0] * dim
index = sample_indexes[i]
new_action[index] = 1
each.extend(new_action)
return each
def sample(action_space_list_each, is_act_continuous):
player = []
if is_act_continuous:
for j in range(len(action_space_list_each)):
each = action_space_list_each[j].sample()
player.append(each)
else:
player = []
for j in range(len(action_space_list_each)):
if action_space_list_each[j].__class__.__name__ == 'Discrete':
each = [0] * action_space_list_each[j].n
idx = action_space_list_each[j].sample()
each[idx] = 1
player.append(each)
elif action_space_list_each[j].__class__.__name__ == 'MultiDiscreteParticle':
each = []
nvec = action_space_list_each[j].high
sample_indexes = action_space_list_each[j].sample()
for i in range(len(nvec)):
dim = nvec[i] + 1
new_action = [0] * dim
index = sample_indexes[i]
new_action[index] = 1
each.extend(new_action)
player.append(each)
return player
|
class Solution(object):
def match_note_to_magazine(self, ransom_note, magazine):
if ransom_note is None or magazine is None:
raise TypeError('ransom_note or magazine cannot be None')
seen_chars = {}
for char in magazine:
if char in seen_chars:
seen_chars[char] += 1
else:
seen_chars[char] = 1
for char in ransom_note:
try:
seen_chars[char] -= 1
except KeyError:
return False
if seen_chars[char] < 0:
return False
return True
|
class Solution(object):
def match_note_to_magazine(self, ransom_note, magazine):
if ransom_note is None or magazine is None:
raise type_error('ransom_note or magazine cannot be None')
seen_chars = {}
for char in magazine:
if char in seen_chars:
seen_chars[char] += 1
else:
seen_chars[char] = 1
for char in ransom_note:
try:
seen_chars[char] -= 1
except KeyError:
return False
if seen_chars[char] < 0:
return False
return True
|
def on_message_deleted(msg, server):
return "Deleted: {}".format(msg["previous_message"]["text"])
def on_message_changed(msg, server):
text = msg.get("message", {"text": ""}).get("text", "")
if text.startswith("!echo"):
return "Changed: {}".format(text)
def on_message(msg, server):
if msg["text"].startswith("!echo"):
return msg.get("text", "")
def on_channel_join(msg, server):
return "saw user {} join".format(msg['user'])
|
def on_message_deleted(msg, server):
return 'Deleted: {}'.format(msg['previous_message']['text'])
def on_message_changed(msg, server):
text = msg.get('message', {'text': ''}).get('text', '')
if text.startswith('!echo'):
return 'Changed: {}'.format(text)
def on_message(msg, server):
if msg['text'].startswith('!echo'):
return msg.get('text', '')
def on_channel_join(msg, server):
return 'saw user {} join'.format(msg['user'])
|
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/hunter/github_projects/ROS/catkin_ws/src/my_package/msg/Num.msg"
services_str = "/home/hunter/github_projects/ROS/catkin_ws/src/my_package/srv/AddTwoInts.srv"
pkg_name = "my_package"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "my_package;/home/hunter/github_projects/ROS/catkin_ws/src/my_package/msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
|
messages_str = '/home/hunter/github_projects/ROS/catkin_ws/src/my_package/msg/Num.msg'
services_str = '/home/hunter/github_projects/ROS/catkin_ws/src/my_package/srv/AddTwoInts.srv'
pkg_name = 'my_package'
dependencies_str = 'std_msgs'
langs = 'gencpp;geneus;genlisp;gennodejs;genpy'
dep_include_paths_str = 'my_package;/home/hunter/github_projects/ROS/catkin_ws/src/my_package/msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg'
python_executable = '/usr/bin/python'
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = '/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py'
|
class FunctionDifferentialRegistry(dict):
def __setitem__(self, k, v):
if not callable(k):
raise ValueError("key must be callable")
if not callable(v):
raise ValueError("value must be callable")
super().__setitem__(k, v)
global_registry = FunctionDifferentialRegistry()
def diff(grad_f):
def inner(f, registry=None):
register_diff(f, grad_f, registry=registry)
return f
return inner
def register_diff(f, grad_f, registry=None):
if registry is None:
registry = global_registry
registry[f] = grad_f
|
class Functiondifferentialregistry(dict):
def __setitem__(self, k, v):
if not callable(k):
raise value_error('key must be callable')
if not callable(v):
raise value_error('value must be callable')
super().__setitem__(k, v)
global_registry = function_differential_registry()
def diff(grad_f):
def inner(f, registry=None):
register_diff(f, grad_f, registry=registry)
return f
return inner
def register_diff(f, grad_f, registry=None):
if registry is None:
registry = global_registry
registry[f] = grad_f
|
"""
The dependencies for running the gen_rust_project binary.
"""
load("//util/import/raze:crates.bzl", "rules_rust_util_import_fetch_remote_crates")
def import_deps():
rules_rust_util_import_fetch_remote_crates()
# For legacy support
gen_rust_project_dependencies = import_deps
|
"""
The dependencies for running the gen_rust_project binary.
"""
load('//util/import/raze:crates.bzl', 'rules_rust_util_import_fetch_remote_crates')
def import_deps():
rules_rust_util_import_fetch_remote_crates()
gen_rust_project_dependencies = import_deps
|
def timeconverter(days):
years = days // 365
days = days % 365
months = days // 30
days = days % 30
print(f"{years} years, {months} months and {days} days")
days = input("Enter number of days: ")
days = int(days)
timeconverter(days)
|
def timeconverter(days):
years = days // 365
days = days % 365
months = days // 30
days = days % 30
print(f'{years} years, {months} months and {days} days')
days = input('Enter number of days: ')
days = int(days)
timeconverter(days)
|
print("hello world")
print("my name is mark zed bruyg")
print("555")
karachi_city = ["gulshan", "johar", "malir", "defence", "liyari"]
print(karachi_city[2])
names_of_student = ["maaz", "musab", "usman", "shuraim", "sudais", "ausaf"]
print(names_of_student)
age = 12
amount_to_increment = 3
age += amount_to_increment
print(age)
|
print('hello world')
print('my name is mark zed bruyg')
print('555')
karachi_city = ['gulshan', 'johar', 'malir', 'defence', 'liyari']
print(karachi_city[2])
names_of_student = ['maaz', 'musab', 'usman', 'shuraim', 'sudais', 'ausaf']
print(names_of_student)
age = 12
amount_to_increment = 3
age += amount_to_increment
print(age)
|
# (raw name, table column)
COLS=[
('cxid', 'cxid'),
('dts', 'dts'),
('Existing Meter Number', 'old_meter_number'),
('Existing Meter Reading', 'old_meter_reading'),
('New Meter Number', 'new_meter_number'),
('New Meter Reading', 'new_meter_reading'),
('Geo Tag', 'geo_tag'),
('GPS', 'gps'),
('SRV Man Initials', 'initials'),
('Meter Site Insp.', 'insp_state'),
('Meter Site Insp. Notes', 'insp_notes'),
# ('SO No', 'sono'),
# ('Map No', 'mapno'),
('Photo1', 'photo1'),
('Photo2', 'photo2'),
('Photo3', 'photo3')
]
DATA_FIELDS=[
'cxid',
'old_meter_number',
'old_meter_reading',
'new_meter_number',
'new_meter_reading',
'dts'
]
LOC_FIELDS=[
'id',
'gps_x',
'gps_y',
'geo_tag'
]
PHOTO_FIELDS=[
'id',
'photo1',
'photo2',
'photo3'
]
SITE_INSP_FIELDS=[
'id',
'initials',
'insp_state',
'insp_notes'
]
class RF_Changeout():
@staticmethod
def from_row(row):
self = RF_Changeout()
for key, col in COLS:
if key in row:
setattr(self, col, row[key])
self.fix_gps()
return self
def __getitem__(self, key):
return getattr(self, key, None)
def __setitem__(self, key, value):
return setattr(self, key, value)
def fix_gps(self):
gps = getattr(self, 'gps', None)
if gps and not 'None' in gps:
u, v = gps.replace(' ', '').split(',')
if u and v:
self.gps_y = float(u) # Latitude
self.gps_x = float(v) # Longitude
del self.gps
def get_data(self):
result=list()
for field in DATA_FIELDS:
result.append(self[field])
return result
def get_loc(self):
result=list()
for field in LOC_FIELDS:
result.append(self[field])
return result
def get_photos(self):
result=list()
for field in PHOTO_FIELDS:
result.append(self[field])
return result
def get_site_insp(self):
result=list()
for field in SITE_INSP_FIELDS:
result.append(self[field])
return result
def collect_rows(row, data):
oid = row['order_id']
key = row['data_point']
val = row['value']
if not oid in data:
data[oid] = dict()
data[oid]['dts'] = row['dts']
data[oid][key] = val
|
cols = [('cxid', 'cxid'), ('dts', 'dts'), ('Existing Meter Number', 'old_meter_number'), ('Existing Meter Reading', 'old_meter_reading'), ('New Meter Number', 'new_meter_number'), ('New Meter Reading', 'new_meter_reading'), ('Geo Tag', 'geo_tag'), ('GPS', 'gps'), ('SRV Man Initials', 'initials'), ('Meter Site Insp.', 'insp_state'), ('Meter Site Insp. Notes', 'insp_notes'), ('Photo1', 'photo1'), ('Photo2', 'photo2'), ('Photo3', 'photo3')]
data_fields = ['cxid', 'old_meter_number', 'old_meter_reading', 'new_meter_number', 'new_meter_reading', 'dts']
loc_fields = ['id', 'gps_x', 'gps_y', 'geo_tag']
photo_fields = ['id', 'photo1', 'photo2', 'photo3']
site_insp_fields = ['id', 'initials', 'insp_state', 'insp_notes']
class Rf_Changeout:
@staticmethod
def from_row(row):
self = rf__changeout()
for (key, col) in COLS:
if key in row:
setattr(self, col, row[key])
self.fix_gps()
return self
def __getitem__(self, key):
return getattr(self, key, None)
def __setitem__(self, key, value):
return setattr(self, key, value)
def fix_gps(self):
gps = getattr(self, 'gps', None)
if gps and (not 'None' in gps):
(u, v) = gps.replace(' ', '').split(',')
if u and v:
self.gps_y = float(u)
self.gps_x = float(v)
del self.gps
def get_data(self):
result = list()
for field in DATA_FIELDS:
result.append(self[field])
return result
def get_loc(self):
result = list()
for field in LOC_FIELDS:
result.append(self[field])
return result
def get_photos(self):
result = list()
for field in PHOTO_FIELDS:
result.append(self[field])
return result
def get_site_insp(self):
result = list()
for field in SITE_INSP_FIELDS:
result.append(self[field])
return result
def collect_rows(row, data):
oid = row['order_id']
key = row['data_point']
val = row['value']
if not oid in data:
data[oid] = dict()
data[oid]['dts'] = row['dts']
data[oid][key] = val
|
class Solution:
def simplifyPath(self, path: str) -> str:
stk = []
for p in path.split('/'):
if p == '..':
if stk:
stk.pop()
elif p and p != '.':
stk.append(p)
return '/' + '/'.join(stk)
|
class Solution:
def simplify_path(self, path: str) -> str:
stk = []
for p in path.split('/'):
if p == '..':
if stk:
stk.pop()
elif p and p != '.':
stk.append(p)
return '/' + '/'.join(stk)
|
class Fraction(object):
def __init__(self, num, den):
self.__num = num
self.__den = den
self.reduce()
def __str__(self):
return "%d/%d" % (self.__num, self.__den)
def __invert__(self):
return Fraction(self.__den,self.__num)
def __neg__(self):
return Fraction(-(self.__num), self.__den)
def __pow__(self, pow):
return Fraction(self.__num **pow,self.__den**pow)
def __float__(self):
return float(self.__num/self.__den)
def __int__(self):
return int(self.__num/self.__den)
def reduce(self):
g = Fraction.gcd(self.__num, self.__den)
self.__num /= g
self.__den /= g
@staticmethod
def gcd(n, m):
if m == 0:
return n
else:
return Fraction.gcd(m, n % m)
|
class Fraction(object):
def __init__(self, num, den):
self.__num = num
self.__den = den
self.reduce()
def __str__(self):
return '%d/%d' % (self.__num, self.__den)
def __invert__(self):
return fraction(self.__den, self.__num)
def __neg__(self):
return fraction(-self.__num, self.__den)
def __pow__(self, pow):
return fraction(self.__num ** pow, self.__den ** pow)
def __float__(self):
return float(self.__num / self.__den)
def __int__(self):
return int(self.__num / self.__den)
def reduce(self):
g = Fraction.gcd(self.__num, self.__den)
self.__num /= g
self.__den /= g
@staticmethod
def gcd(n, m):
if m == 0:
return n
else:
return Fraction.gcd(m, n % m)
|
"""Configuration file for common models/experiments"""
MAIN_PARAMS = {
'sent140': {
'small': (10, 2, 2),
'medium': (16, 2, 2),
'large': (24, 2, 2)
},
'femnist': {
'small': (30, 10, 2),
'medium': (100, 10, 2),
'large': (400, 20, 2)
},
'uni-femnist': {
'small': (30, 10, 2),
'medium': (100, 10, 2),
'large': (400, 20, 2)
},
'shakespeare': {
'small': (6, 2, 2),
'medium': (8, 2, 2),
'large': (20, 1, 2)
},
'celeba': {
'small': (30, 10, 2),
'medium': (100, 10, 2),
'large': (400, 20, 2)
},
'synthetic': {
'small': (6, 2, 2),
'medium': (8, 2, 2),
'large': (20, 1, 2)
},
'reddit': {
'small': (6, 2, 2),
'medium': (8, 2, 2),
'large': (20, 1, 2)
},
}
"""dict: Specifies execution parameters (tot_num_rounds, eval_every_num_rounds, clients_per_round)"""
MODEL_PARAMS = {
'sent140.bag_dnn': (0.0003, 2), # lr, num_classes
'sent140.stacked_lstm': (0.0003, 25, 2, 100), # lr, seq_len, num_classes, num_hidden
'sent140.bag_log_reg': (0.0003, 2), # lr, num_classes
'femnist.cnn': (0.0003, 62), # lr, num_classes
'shakespeare.stacked_lstm': (0.0003, 80, 80, 256), # lr, seq_len, num_classes, num_hidden
'celeba.cnn': (0.1, 2), # lr, num_classes
'synthetic.log_reg': (0.0003, 5, 60), # lr, num_classes, input_dim
'reddit.stacked_lstm': (0.0003, 10, 256, 2), # lr, seq_len, num_hidden, num_layers
}
"""dict: Model specific parameter specification"""
ACCURACY_KEY = 'accuracy'
BYTES_WRITTEN_KEY = 'bytes_written'
BYTES_READ_KEY = 'bytes_read'
LOCAL_COMPUTATIONS_KEY = 'local_computations'
NUM_ROUND_KEY = 'round_number'
NUM_SAMPLES_KEY = 'num_samples'
CLIENT_ID_KEY = 'client_id'
|
"""Configuration file for common models/experiments"""
main_params = {'sent140': {'small': (10, 2, 2), 'medium': (16, 2, 2), 'large': (24, 2, 2)}, 'femnist': {'small': (30, 10, 2), 'medium': (100, 10, 2), 'large': (400, 20, 2)}, 'uni-femnist': {'small': (30, 10, 2), 'medium': (100, 10, 2), 'large': (400, 20, 2)}, 'shakespeare': {'small': (6, 2, 2), 'medium': (8, 2, 2), 'large': (20, 1, 2)}, 'celeba': {'small': (30, 10, 2), 'medium': (100, 10, 2), 'large': (400, 20, 2)}, 'synthetic': {'small': (6, 2, 2), 'medium': (8, 2, 2), 'large': (20, 1, 2)}, 'reddit': {'small': (6, 2, 2), 'medium': (8, 2, 2), 'large': (20, 1, 2)}}
'dict: Specifies execution parameters (tot_num_rounds, eval_every_num_rounds, clients_per_round)'
model_params = {'sent140.bag_dnn': (0.0003, 2), 'sent140.stacked_lstm': (0.0003, 25, 2, 100), 'sent140.bag_log_reg': (0.0003, 2), 'femnist.cnn': (0.0003, 62), 'shakespeare.stacked_lstm': (0.0003, 80, 80, 256), 'celeba.cnn': (0.1, 2), 'synthetic.log_reg': (0.0003, 5, 60), 'reddit.stacked_lstm': (0.0003, 10, 256, 2)}
'dict: Model specific parameter specification'
accuracy_key = 'accuracy'
bytes_written_key = 'bytes_written'
bytes_read_key = 'bytes_read'
local_computations_key = 'local_computations'
num_round_key = 'round_number'
num_samples_key = 'num_samples'
client_id_key = 'client_id'
|
# filter1.py to get even numbers from a list
def is_dublicate(item):
return not(item in mylist)
mylist = ["Orange","Apple", "Banana", "Peach", "Banana"]
new_list = list(filter(is_dublicate, mylist))
print(new_list)
|
def is_dublicate(item):
return not item in mylist
mylist = ['Orange', 'Apple', 'Banana', 'Peach', 'Banana']
new_list = list(filter(is_dublicate, mylist))
print(new_list)
|
class strongly_connected_component():
def __init__(self, graph=None, visited=None):
self.graph = dict()
self.visited = dict()
self.stack=list()
def add_vertex(self, v, graph, visited):
if not graph.get(v):
graph[v] = []
visited[v]=0
def add_edge(self, v1, v2, e, graph = None):
if v1 not in graph:
print("Vertex ", v1, " does not exist.")
elif v2 not in graph:
print("Vertex ", v2, " does not exist.")
else:
temp = [v2, e]
graph[v1].append(temp)
def reverse_graph(self, original_graph, reverse_graph, rev_graph_visited):
for key in original_graph.keys():
self.add_vertex(key, reverse_graph, rev_graph_visited)
for src, value in original_graph.items():
for dest in value:
self.add_edge(dest[0], src, dest[1], reverse_graph)
def dfs_visit(self, v,visited):
visited[v]=1
for edges in self.graph[v]:
if self.visited[edges[0]]!=1:
self.dfs_visit(edges[0],self.visited)
self.stack.append(v)
def scc_dfs(self,v, reverse_graph, reverse_visited, res):
reverse_visited[v] = 1
res.append(v)
for edges in reverse_graph[v]:
if reverse_visited[edges[0]]!=1:
self.scc_dfs(edges[0], reverse_graph ,reverse_visited, res)
def dfs_main(self):
for key, value in self.graph.items():
if self.visited[key] != 1:
self.dfs_visit(key,self.visited)
def strongly_connected_components_driver(self):
reverse_graph = dict()
reverse_graph_visited = dict()
res = []
final = []
self.dfs_main()
self.reverse_graph(self.graph, reverse_graph, reverse_graph_visited)
while self.stack:
vertex = self.stack.pop()
if reverse_graph_visited[vertex] != 1:
self.scc_dfs(vertex, reverse_graph, reverse_graph_visited, res)
final.append(res)
res = []
return final
def scc_main(self, fileName='directedGraph1.txt'):
sc = strongly_connected_component()
fileLines = []
with open(fileName,'r') as graph_file:
fileLines = graph_file.read().splitlines()
graph_info = fileLines[0].split(' ')
number_of_vertices = graph_info[0]
number_of_edges = graph_info[1]
for i in range(1, len(fileLines)-1):
edge_info = fileLines[i].split(' ')
vertex_src = edge_info[0]
vertex_dest = edge_info[1]
edge_weight = edge_info[2]
sc.add_vertex(vertex_src, sc.graph, sc.visited)
sc.add_vertex(vertex_dest, sc.graph, sc.visited)
sc.add_edge(vertex_src, vertex_dest, int(edge_weight),sc.graph)
print("The strong connected components are:", sc.strongly_connected_components_driver())
if __name__ == "__main__":
menu = { 1: 'Directed Graph 1', 2: 'Directed Graph 2',3: 'Directed Graph 3',4: 'Directed Graph 4', 5: 'Exit'}
d = strongly_connected_component()
fileName = ''
while True:
print('--------------------------------------------------')
for key, value in menu.items():
print(key,'-',value)
select_option = ''
try:
select_option = int(input('Please select the graph to be used to get the strongly connected component: '))
except:
print('Please input a number')
if select_option == 1:
fileName = 'scc_1.txt'
d.scc_main(fileName)
elif select_option == 2:
fileName = 'scc_2.txt'
d.scc_main(fileName)
elif select_option == 3:
fileName = 'scc_3.txt'
d.scc_main(fileName)
elif select_option == 4:
fileName = 'scc_4.txt'
d.scc_main(fileName)
elif select_option == 5:
break
else:
print('Please enter input from the menu options')
|
class Strongly_Connected_Component:
def __init__(self, graph=None, visited=None):
self.graph = dict()
self.visited = dict()
self.stack = list()
def add_vertex(self, v, graph, visited):
if not graph.get(v):
graph[v] = []
visited[v] = 0
def add_edge(self, v1, v2, e, graph=None):
if v1 not in graph:
print('Vertex ', v1, ' does not exist.')
elif v2 not in graph:
print('Vertex ', v2, ' does not exist.')
else:
temp = [v2, e]
graph[v1].append(temp)
def reverse_graph(self, original_graph, reverse_graph, rev_graph_visited):
for key in original_graph.keys():
self.add_vertex(key, reverse_graph, rev_graph_visited)
for (src, value) in original_graph.items():
for dest in value:
self.add_edge(dest[0], src, dest[1], reverse_graph)
def dfs_visit(self, v, visited):
visited[v] = 1
for edges in self.graph[v]:
if self.visited[edges[0]] != 1:
self.dfs_visit(edges[0], self.visited)
self.stack.append(v)
def scc_dfs(self, v, reverse_graph, reverse_visited, res):
reverse_visited[v] = 1
res.append(v)
for edges in reverse_graph[v]:
if reverse_visited[edges[0]] != 1:
self.scc_dfs(edges[0], reverse_graph, reverse_visited, res)
def dfs_main(self):
for (key, value) in self.graph.items():
if self.visited[key] != 1:
self.dfs_visit(key, self.visited)
def strongly_connected_components_driver(self):
reverse_graph = dict()
reverse_graph_visited = dict()
res = []
final = []
self.dfs_main()
self.reverse_graph(self.graph, reverse_graph, reverse_graph_visited)
while self.stack:
vertex = self.stack.pop()
if reverse_graph_visited[vertex] != 1:
self.scc_dfs(vertex, reverse_graph, reverse_graph_visited, res)
final.append(res)
res = []
return final
def scc_main(self, fileName='directedGraph1.txt'):
sc = strongly_connected_component()
file_lines = []
with open(fileName, 'r') as graph_file:
file_lines = graph_file.read().splitlines()
graph_info = fileLines[0].split(' ')
number_of_vertices = graph_info[0]
number_of_edges = graph_info[1]
for i in range(1, len(fileLines) - 1):
edge_info = fileLines[i].split(' ')
vertex_src = edge_info[0]
vertex_dest = edge_info[1]
edge_weight = edge_info[2]
sc.add_vertex(vertex_src, sc.graph, sc.visited)
sc.add_vertex(vertex_dest, sc.graph, sc.visited)
sc.add_edge(vertex_src, vertex_dest, int(edge_weight), sc.graph)
print('The strong connected components are:', sc.strongly_connected_components_driver())
if __name__ == '__main__':
menu = {1: 'Directed Graph 1', 2: 'Directed Graph 2', 3: 'Directed Graph 3', 4: 'Directed Graph 4', 5: 'Exit'}
d = strongly_connected_component()
file_name = ''
while True:
print('--------------------------------------------------')
for (key, value) in menu.items():
print(key, '-', value)
select_option = ''
try:
select_option = int(input('Please select the graph to be used to get the strongly connected component: '))
except:
print('Please input a number')
if select_option == 1:
file_name = 'scc_1.txt'
d.scc_main(fileName)
elif select_option == 2:
file_name = 'scc_2.txt'
d.scc_main(fileName)
elif select_option == 3:
file_name = 'scc_3.txt'
d.scc_main(fileName)
elif select_option == 4:
file_name = 'scc_4.txt'
d.scc_main(fileName)
elif select_option == 5:
break
else:
print('Please enter input from the menu options')
|
AVAILABLE_LANGUAGES = {
"english": "en",
"indonesian": "id",
"czech": "cs",
"german": "de",
"spanish": "es-419",
"french": "fr",
"italian": "it",
"latvian": "lv",
"lithuanian": "lt",
"hungarian": "hu",
"dutch": "nl",
"norwegian": "no",
"polish": "pl",
"portuguese brasil": "pt-419",
"portuguese portugal": "pt-150",
"romanian": "ro",
"slovak": "sk",
"slovenian": "sl",
"swedish": "sv",
"vietnamese": "vi",
"turkish": "tr",
"greek": "el",
"bulgarian": "bg",
"russian": "ru",
"serbian": "sr",
"ukrainian": "uk",
"hebrew": "he",
"arabic": "ar",
"marathi": "mr",
"hindi": "hi",
"bengali": "bn",
"tamil": "ta",
"telugu": "te",
"malyalam": "ml",
"thai": "th",
"chinese simplified": "zh-Hans",
"chinese traditional": "zh-Hant",
"japanese": "ja",
"korean": "ko"
}
AVAILABLE_COUNTRIES = {
"Australia": "AU",
"Botswana": "BW",
"Canada ": "CA",
"Ethiopia": "ET",
"Ghana": "GH",
"India ": "IN",
"Indonesia": "ID",
"Ireland": "IE",
"Israel ": "IL",
"Kenya": "KE",
"Latvia": "LV",
"Malaysia": "MY",
"Namibia": "NA",
"New Zealand": "NZ",
"Nigeria": "NG",
"Pakistan": "PK",
"Philippines": "PH",
"Singapore": "SG",
"South Africa": "ZA",
"Tanzania": "TZ",
"Uganda": "UG",
"United Kingdom": "GB",
"United States": "US",
"Zimbabwe": "ZW",
"Czech Republic": "CZ",
"Germany": "DE",
"Austria": "AT",
"Switzerland": "CH",
"Argentina": "AR",
"Chile": "CL",
"Colombia": "CO",
"Cuba": "CU",
"Mexico": "MX",
"Peru": "PE",
"Venezuela": "VE",
"Belgium ": "BE",
"France": "FR",
"Morocco": "MA",
"Senegal": "SN",
"Italy": "IT",
"Lithuania": "LT",
"Hungary": "HU",
"Netherlands": "NL",
"Norway": "NO",
"Poland": "PL",
"Brazil": "BR",
"Portugal": "PT",
"Romania": "RO",
"Slovakia": "SK",
"Slovenia": "SI",
"Sweden": "SE",
"Vietnam": "VN",
"Turkey": "TR",
"Greece": "GR",
"Bulgaria": "BG",
"Russia": "RU",
"Ukraine ": "UA",
"Serbia": "RS",
"United Arab Emirates": "AE",
"Saudi Arabia": "SA",
"Lebanon": "LB",
"Egypt": "EG",
"Bangladesh": "BD",
"Thailand": "TH",
"China": "CN",
"Taiwan": "TW",
"Hong Kong": "HK",
"Japan": "JP",
"Republic of Korea": "KR"
}
|
available_languages = {'english': 'en', 'indonesian': 'id', 'czech': 'cs', 'german': 'de', 'spanish': 'es-419', 'french': 'fr', 'italian': 'it', 'latvian': 'lv', 'lithuanian': 'lt', 'hungarian': 'hu', 'dutch': 'nl', 'norwegian': 'no', 'polish': 'pl', 'portuguese brasil': 'pt-419', 'portuguese portugal': 'pt-150', 'romanian': 'ro', 'slovak': 'sk', 'slovenian': 'sl', 'swedish': 'sv', 'vietnamese': 'vi', 'turkish': 'tr', 'greek': 'el', 'bulgarian': 'bg', 'russian': 'ru', 'serbian': 'sr', 'ukrainian': 'uk', 'hebrew': 'he', 'arabic': 'ar', 'marathi': 'mr', 'hindi': 'hi', 'bengali': 'bn', 'tamil': 'ta', 'telugu': 'te', 'malyalam': 'ml', 'thai': 'th', 'chinese simplified': 'zh-Hans', 'chinese traditional': 'zh-Hant', 'japanese': 'ja', 'korean': 'ko'}
available_countries = {'Australia': 'AU', 'Botswana': 'BW', 'Canada ': 'CA', 'Ethiopia': 'ET', 'Ghana': 'GH', 'India ': 'IN', 'Indonesia': 'ID', 'Ireland': 'IE', 'Israel ': 'IL', 'Kenya': 'KE', 'Latvia': 'LV', 'Malaysia': 'MY', 'Namibia': 'NA', 'New Zealand': 'NZ', 'Nigeria': 'NG', 'Pakistan': 'PK', 'Philippines': 'PH', 'Singapore': 'SG', 'South Africa': 'ZA', 'Tanzania': 'TZ', 'Uganda': 'UG', 'United Kingdom': 'GB', 'United States': 'US', 'Zimbabwe': 'ZW', 'Czech Republic': 'CZ', 'Germany': 'DE', 'Austria': 'AT', 'Switzerland': 'CH', 'Argentina': 'AR', 'Chile': 'CL', 'Colombia': 'CO', 'Cuba': 'CU', 'Mexico': 'MX', 'Peru': 'PE', 'Venezuela': 'VE', 'Belgium ': 'BE', 'France': 'FR', 'Morocco': 'MA', 'Senegal': 'SN', 'Italy': 'IT', 'Lithuania': 'LT', 'Hungary': 'HU', 'Netherlands': 'NL', 'Norway': 'NO', 'Poland': 'PL', 'Brazil': 'BR', 'Portugal': 'PT', 'Romania': 'RO', 'Slovakia': 'SK', 'Slovenia': 'SI', 'Sweden': 'SE', 'Vietnam': 'VN', 'Turkey': 'TR', 'Greece': 'GR', 'Bulgaria': 'BG', 'Russia': 'RU', 'Ukraine ': 'UA', 'Serbia': 'RS', 'United Arab Emirates': 'AE', 'Saudi Arabia': 'SA', 'Lebanon': 'LB', 'Egypt': 'EG', 'Bangladesh': 'BD', 'Thailand': 'TH', 'China': 'CN', 'Taiwan': 'TW', 'Hong Kong': 'HK', 'Japan': 'JP', 'Republic of Korea': 'KR'}
|
sala = []
def AdicionarSala(cod_sala,lotacao):
aux = [cod_sala,lotacao]
sala.append(aux)
print (" === Sala adicionada === ")
def StatusOcupada(cod_sala):
for s in sala:
if (s[0] == cod_sala):
s[1] = "Ocupada"
return s
return None
def StatusLivre(cod_sala):
for s in sala:
if (s[0] == cod_sala):
s[1] = "Ocupada"
return s
return None
def BuscarSala(cod_sala):
for s in sala:
if (s[0] == cod_sala):
print (s)
return s
return None
def ListarSala():
global sala
print (sala)
return sala
def RemoverSala(cod_sala):
for s in sala:
if (s[0] == cod_sala):
sala.remove(s)
print (" ==== Sala removida === ")
return True
return False
def RemoverTodasSalas():
global sala
sala = []
return sala
def IniciarSala():
AdicionarSala(1,"livre")
AdicionarSala(2,"ocupada")
|
sala = []
def adicionar_sala(cod_sala, lotacao):
aux = [cod_sala, lotacao]
sala.append(aux)
print(' === Sala adicionada === ')
def status_ocupada(cod_sala):
for s in sala:
if s[0] == cod_sala:
s[1] = 'Ocupada'
return s
return None
def status_livre(cod_sala):
for s in sala:
if s[0] == cod_sala:
s[1] = 'Ocupada'
return s
return None
def buscar_sala(cod_sala):
for s in sala:
if s[0] == cod_sala:
print(s)
return s
return None
def listar_sala():
global sala
print(sala)
return sala
def remover_sala(cod_sala):
for s in sala:
if s[0] == cod_sala:
sala.remove(s)
print(' ==== Sala removida === ')
return True
return False
def remover_todas_salas():
global sala
sala = []
return sala
def iniciar_sala():
adicionar_sala(1, 'livre')
adicionar_sala(2, 'ocupada')
|
"""
All wgpu structs.
"""
# THIS CODE IS AUTOGENERATED - DO NOT EDIT
# %% Structs (45)
RequestAdapterOptions = {"power_preference": "GPUPowerPreference"}
DeviceDescriptor = {
"label": "str",
"extensions": "GPUExtensionName-list",
"limits": "GPULimits",
}
Limits = {
"max_bind_groups": "GPUSize32",
"max_dynamic_uniform_buffers_per_pipeline_layout": "GPUSize32",
"max_dynamic_storage_buffers_per_pipeline_layout": "GPUSize32",
"max_sampled_textures_per_shader_stage": "GPUSize32",
"max_samplers_per_shader_stage": "GPUSize32",
"max_storage_buffers_per_shader_stage": "GPUSize32",
"max_storage_textures_per_shader_stage": "GPUSize32",
"max_uniform_buffers_per_shader_stage": "GPUSize32",
"max_uniform_buffer_binding_size": "GPUSize32",
}
BufferDescriptor = {
"label": "str",
"size": "int",
"usage": "GPUBufferUsageFlags",
"mapped_at_creation": "bool",
}
TextureDescriptor = {
"label": "str",
"size": "GPUExtent3D",
"mip_level_count": "GPUIntegerCoordinate",
"sample_count": "GPUSize32",
"dimension": "GPUTextureDimension",
"format": "GPUTextureFormat",
"usage": "GPUTextureUsageFlags",
}
TextureViewDescriptor = {
"label": "str",
"format": "GPUTextureFormat",
"dimension": "GPUTextureViewDimension",
"aspect": "GPUTextureAspect",
"base_mip_level": "GPUIntegerCoordinate",
"mip_level_count": "GPUIntegerCoordinate",
"base_array_layer": "GPUIntegerCoordinate",
"array_layer_count": "GPUIntegerCoordinate",
}
SamplerDescriptor = {
"label": "str",
"address_mode_u": "GPUAddressMode",
"address_mode_v": "GPUAddressMode",
"address_mode_w": "GPUAddressMode",
"mag_filter": "GPUFilterMode",
"min_filter": "GPUFilterMode",
"mipmap_filter": "GPUFilterMode",
"lod_min_clamp": "float",
"lod_max_clamp": "float",
"compare": "GPUCompareFunction",
}
BindGroupLayoutDescriptor = {"label": "str", "entries": "GPUBindGroupLayoutEntry-list"}
BindGroupLayoutEntry = {
"binding": "GPUIndex32",
"visibility": "GPUShaderStageFlags",
"type": "GPUBindingType",
"has_dynamic_offset": "bool",
"min_buffer_binding_size": "int",
"view_dimension": "GPUTextureViewDimension",
"texture_component_type": "GPUTextureComponentType",
"multisampled": "bool",
"storage_texture_format": "GPUTextureFormat",
}
BindGroupDescriptor = {
"label": "str",
"layout": "GPUBindGroupLayout",
"entries": "GPUBindGroupEntry-list",
}
BindGroupEntry = {"binding": "GPUIndex32", "resource": "GPUBindingResource"}
BufferBinding = {"buffer": "GPUBuffer", "offset": "int", "size": "int"}
PipelineLayoutDescriptor = {
"label": "str",
"bind_group_layouts": "GPUBindGroupLayout-list",
}
ShaderModuleDescriptor = {"label": "str", "code": "str", "source_map": "dict"}
ProgrammableStageDescriptor = {"module": "GPUShaderModule", "entry_point": "str"}
ComputePipelineDescriptor = {
"label": "str",
"layout": "GPUPipelineLayout",
"compute_stage": "GPUProgrammableStageDescriptor",
}
RenderPipelineDescriptor = {
"label": "str",
"layout": "GPUPipelineLayout",
"vertex_stage": "GPUProgrammableStageDescriptor",
"fragment_stage": "GPUProgrammableStageDescriptor",
"primitive_topology": "GPUPrimitiveTopology",
"rasterization_state": "GPURasterizationStateDescriptor",
"color_states": "GPUColorStateDescriptor-list",
"depth_stencil_state": "GPUDepthStencilStateDescriptor",
"vertex_state": "GPUVertexStateDescriptor",
"sample_count": "GPUSize32",
"sample_mask": "GPUSampleMask",
"alpha_to_coverage_enabled": "bool",
}
RasterizationStateDescriptor = {
"front_face": "GPUFrontFace",
"cull_mode": "GPUCullMode",
"depth_bias": "GPUDepthBias",
"depth_bias_slope_scale": "float",
"depth_bias_clamp": "float",
}
ColorStateDescriptor = {
"format": "GPUTextureFormat",
"alpha_blend": "GPUBlendDescriptor",
"color_blend": "GPUBlendDescriptor",
"write_mask": "GPUColorWriteFlags",
}
BlendDescriptor = {
"src_factor": "GPUBlendFactor",
"dst_factor": "GPUBlendFactor",
"operation": "GPUBlendOperation",
}
DepthStencilStateDescriptor = {
"format": "GPUTextureFormat",
"depth_write_enabled": "bool",
"depth_compare": "GPUCompareFunction",
"stencil_front": "GPUStencilStateFaceDescriptor",
"stencil_back": "GPUStencilStateFaceDescriptor",
"stencil_read_mask": "GPUStencilValue",
"stencil_write_mask": "GPUStencilValue",
}
StencilStateFaceDescriptor = {
"compare": "GPUCompareFunction",
"fail_op": "GPUStencilOperation",
"depth_fail_op": "GPUStencilOperation",
"pass_op": "GPUStencilOperation",
}
VertexStateDescriptor = {
"index_format": "GPUIndexFormat",
"vertex_buffers": "GPUVertexBufferLayoutDescriptor?-list",
}
VertexBufferLayoutDescriptor = {
"array_stride": "int",
"step_mode": "GPUInputStepMode",
"attributes": "GPUVertexAttributeDescriptor-list",
}
VertexAttributeDescriptor = {
"format": "GPUVertexFormat",
"offset": "int",
"shader_location": "GPUIndex32",
}
CommandBufferDescriptor = {"label": "str"}
CommandEncoderDescriptor = {"label": "str"}
TextureDataLayout = {
"offset": "int",
"bytes_per_row": "GPUSize32",
"rows_per_image": "GPUSize32",
}
BufferCopyView = {
"offset": "int",
"bytes_per_row": "GPUSize32",
"rows_per_image": "GPUSize32",
"buffer": "GPUBuffer",
}
TextureCopyView = {
"texture": "GPUTexture",
"mip_level": "GPUIntegerCoordinate",
"origin": "GPUOrigin3D",
}
ImageBitmapCopyView = {"image_bitmap": "array", "origin": "GPUOrigin2D"}
ComputePassDescriptor = {"label": "str"}
RenderPassDescriptor = {
"label": "str",
"color_attachments": "GPURenderPassColorAttachmentDescriptor-list",
"depth_stencil_attachment": "GPURenderPassDepthStencilAttachmentDescriptor",
"occlusion_query_set": "GPUQuerySet",
}
RenderPassColorAttachmentDescriptor = {
"attachment": "GPUTextureView",
"resolve_target": "GPUTextureView",
"load_value": "GPULoadOp-or-GPUColor",
"store_op": "GPUStoreOp",
}
RenderPassDepthStencilAttachmentDescriptor = {
"attachment": "GPUTextureView",
"depth_load_value": "GPULoadOp-or-float",
"depth_store_op": "GPUStoreOp",
"depth_read_only": "bool",
"stencil_load_value": "GPULoadOp-or-GPUStencilValue",
"stencil_store_op": "GPUStoreOp",
"stencil_read_only": "bool",
}
RenderBundleDescriptor = {"label": "str"}
RenderBundleEncoderDescriptor = {
"label": "str",
"color_formats": "GPUTextureFormat-list",
"depth_stencil_format": "GPUTextureFormat",
"sample_count": "GPUSize32",
}
FenceDescriptor = {"label": "str", "initial_value": "GPUFenceValue"}
QuerySetDescriptor = {
"label": "str",
"type": "GPUQueryType",
"count": "GPUSize32",
"pipeline_statistics": "GPUPipelineStatisticName-list",
}
SwapChainDescriptor = {
"label": "str",
"device": "GPUDevice",
"format": "GPUTextureFormat",
"usage": "GPUTextureUsageFlags",
}
UncapturedErrorEventInit = {"error": "GPUError"}
Color = {"r": "float", "g": "float", "b": "float", "a": "float"}
Origin2D = {"x": "GPUIntegerCoordinate", "y": "GPUIntegerCoordinate"}
Origin3D = {
"x": "GPUIntegerCoordinate",
"y": "GPUIntegerCoordinate",
"z": "GPUIntegerCoordinate",
}
Extent3D = {
"width": "GPUIntegerCoordinate",
"height": "GPUIntegerCoordinate",
"depth": "GPUIntegerCoordinate",
}
|
"""
All wgpu structs.
"""
request_adapter_options = {'power_preference': 'GPUPowerPreference'}
device_descriptor = {'label': 'str', 'extensions': 'GPUExtensionName-list', 'limits': 'GPULimits'}
limits = {'max_bind_groups': 'GPUSize32', 'max_dynamic_uniform_buffers_per_pipeline_layout': 'GPUSize32', 'max_dynamic_storage_buffers_per_pipeline_layout': 'GPUSize32', 'max_sampled_textures_per_shader_stage': 'GPUSize32', 'max_samplers_per_shader_stage': 'GPUSize32', 'max_storage_buffers_per_shader_stage': 'GPUSize32', 'max_storage_textures_per_shader_stage': 'GPUSize32', 'max_uniform_buffers_per_shader_stage': 'GPUSize32', 'max_uniform_buffer_binding_size': 'GPUSize32'}
buffer_descriptor = {'label': 'str', 'size': 'int', 'usage': 'GPUBufferUsageFlags', 'mapped_at_creation': 'bool'}
texture_descriptor = {'label': 'str', 'size': 'GPUExtent3D', 'mip_level_count': 'GPUIntegerCoordinate', 'sample_count': 'GPUSize32', 'dimension': 'GPUTextureDimension', 'format': 'GPUTextureFormat', 'usage': 'GPUTextureUsageFlags'}
texture_view_descriptor = {'label': 'str', 'format': 'GPUTextureFormat', 'dimension': 'GPUTextureViewDimension', 'aspect': 'GPUTextureAspect', 'base_mip_level': 'GPUIntegerCoordinate', 'mip_level_count': 'GPUIntegerCoordinate', 'base_array_layer': 'GPUIntegerCoordinate', 'array_layer_count': 'GPUIntegerCoordinate'}
sampler_descriptor = {'label': 'str', 'address_mode_u': 'GPUAddressMode', 'address_mode_v': 'GPUAddressMode', 'address_mode_w': 'GPUAddressMode', 'mag_filter': 'GPUFilterMode', 'min_filter': 'GPUFilterMode', 'mipmap_filter': 'GPUFilterMode', 'lod_min_clamp': 'float', 'lod_max_clamp': 'float', 'compare': 'GPUCompareFunction'}
bind_group_layout_descriptor = {'label': 'str', 'entries': 'GPUBindGroupLayoutEntry-list'}
bind_group_layout_entry = {'binding': 'GPUIndex32', 'visibility': 'GPUShaderStageFlags', 'type': 'GPUBindingType', 'has_dynamic_offset': 'bool', 'min_buffer_binding_size': 'int', 'view_dimension': 'GPUTextureViewDimension', 'texture_component_type': 'GPUTextureComponentType', 'multisampled': 'bool', 'storage_texture_format': 'GPUTextureFormat'}
bind_group_descriptor = {'label': 'str', 'layout': 'GPUBindGroupLayout', 'entries': 'GPUBindGroupEntry-list'}
bind_group_entry = {'binding': 'GPUIndex32', 'resource': 'GPUBindingResource'}
buffer_binding = {'buffer': 'GPUBuffer', 'offset': 'int', 'size': 'int'}
pipeline_layout_descriptor = {'label': 'str', 'bind_group_layouts': 'GPUBindGroupLayout-list'}
shader_module_descriptor = {'label': 'str', 'code': 'str', 'source_map': 'dict'}
programmable_stage_descriptor = {'module': 'GPUShaderModule', 'entry_point': 'str'}
compute_pipeline_descriptor = {'label': 'str', 'layout': 'GPUPipelineLayout', 'compute_stage': 'GPUProgrammableStageDescriptor'}
render_pipeline_descriptor = {'label': 'str', 'layout': 'GPUPipelineLayout', 'vertex_stage': 'GPUProgrammableStageDescriptor', 'fragment_stage': 'GPUProgrammableStageDescriptor', 'primitive_topology': 'GPUPrimitiveTopology', 'rasterization_state': 'GPURasterizationStateDescriptor', 'color_states': 'GPUColorStateDescriptor-list', 'depth_stencil_state': 'GPUDepthStencilStateDescriptor', 'vertex_state': 'GPUVertexStateDescriptor', 'sample_count': 'GPUSize32', 'sample_mask': 'GPUSampleMask', 'alpha_to_coverage_enabled': 'bool'}
rasterization_state_descriptor = {'front_face': 'GPUFrontFace', 'cull_mode': 'GPUCullMode', 'depth_bias': 'GPUDepthBias', 'depth_bias_slope_scale': 'float', 'depth_bias_clamp': 'float'}
color_state_descriptor = {'format': 'GPUTextureFormat', 'alpha_blend': 'GPUBlendDescriptor', 'color_blend': 'GPUBlendDescriptor', 'write_mask': 'GPUColorWriteFlags'}
blend_descriptor = {'src_factor': 'GPUBlendFactor', 'dst_factor': 'GPUBlendFactor', 'operation': 'GPUBlendOperation'}
depth_stencil_state_descriptor = {'format': 'GPUTextureFormat', 'depth_write_enabled': 'bool', 'depth_compare': 'GPUCompareFunction', 'stencil_front': 'GPUStencilStateFaceDescriptor', 'stencil_back': 'GPUStencilStateFaceDescriptor', 'stencil_read_mask': 'GPUStencilValue', 'stencil_write_mask': 'GPUStencilValue'}
stencil_state_face_descriptor = {'compare': 'GPUCompareFunction', 'fail_op': 'GPUStencilOperation', 'depth_fail_op': 'GPUStencilOperation', 'pass_op': 'GPUStencilOperation'}
vertex_state_descriptor = {'index_format': 'GPUIndexFormat', 'vertex_buffers': 'GPUVertexBufferLayoutDescriptor?-list'}
vertex_buffer_layout_descriptor = {'array_stride': 'int', 'step_mode': 'GPUInputStepMode', 'attributes': 'GPUVertexAttributeDescriptor-list'}
vertex_attribute_descriptor = {'format': 'GPUVertexFormat', 'offset': 'int', 'shader_location': 'GPUIndex32'}
command_buffer_descriptor = {'label': 'str'}
command_encoder_descriptor = {'label': 'str'}
texture_data_layout = {'offset': 'int', 'bytes_per_row': 'GPUSize32', 'rows_per_image': 'GPUSize32'}
buffer_copy_view = {'offset': 'int', 'bytes_per_row': 'GPUSize32', 'rows_per_image': 'GPUSize32', 'buffer': 'GPUBuffer'}
texture_copy_view = {'texture': 'GPUTexture', 'mip_level': 'GPUIntegerCoordinate', 'origin': 'GPUOrigin3D'}
image_bitmap_copy_view = {'image_bitmap': 'array', 'origin': 'GPUOrigin2D'}
compute_pass_descriptor = {'label': 'str'}
render_pass_descriptor = {'label': 'str', 'color_attachments': 'GPURenderPassColorAttachmentDescriptor-list', 'depth_stencil_attachment': 'GPURenderPassDepthStencilAttachmentDescriptor', 'occlusion_query_set': 'GPUQuerySet'}
render_pass_color_attachment_descriptor = {'attachment': 'GPUTextureView', 'resolve_target': 'GPUTextureView', 'load_value': 'GPULoadOp-or-GPUColor', 'store_op': 'GPUStoreOp'}
render_pass_depth_stencil_attachment_descriptor = {'attachment': 'GPUTextureView', 'depth_load_value': 'GPULoadOp-or-float', 'depth_store_op': 'GPUStoreOp', 'depth_read_only': 'bool', 'stencil_load_value': 'GPULoadOp-or-GPUStencilValue', 'stencil_store_op': 'GPUStoreOp', 'stencil_read_only': 'bool'}
render_bundle_descriptor = {'label': 'str'}
render_bundle_encoder_descriptor = {'label': 'str', 'color_formats': 'GPUTextureFormat-list', 'depth_stencil_format': 'GPUTextureFormat', 'sample_count': 'GPUSize32'}
fence_descriptor = {'label': 'str', 'initial_value': 'GPUFenceValue'}
query_set_descriptor = {'label': 'str', 'type': 'GPUQueryType', 'count': 'GPUSize32', 'pipeline_statistics': 'GPUPipelineStatisticName-list'}
swap_chain_descriptor = {'label': 'str', 'device': 'GPUDevice', 'format': 'GPUTextureFormat', 'usage': 'GPUTextureUsageFlags'}
uncaptured_error_event_init = {'error': 'GPUError'}
color = {'r': 'float', 'g': 'float', 'b': 'float', 'a': 'float'}
origin2_d = {'x': 'GPUIntegerCoordinate', 'y': 'GPUIntegerCoordinate'}
origin3_d = {'x': 'GPUIntegerCoordinate', 'y': 'GPUIntegerCoordinate', 'z': 'GPUIntegerCoordinate'}
extent3_d = {'width': 'GPUIntegerCoordinate', 'height': 'GPUIntegerCoordinate', 'depth': 'GPUIntegerCoordinate'}
|
class InvalidStateTransition(Exception):
pass
class State(object):
def __init__(self, initial=False, **kwargs):
self.initial = initial
def __eq__(self, other):
if isinstance(other, basestring):
return self.name == other
elif isinstance(other, State):
return self.name == other.name
else:
return False
def __ne__(self, other):
return not self == other
class Event(object):
def __init__(self, **kwargs):
self.to_state = kwargs.get('to_state', None)
self.from_states = tuple()
from_state_args = kwargs.get('from_states', tuple())
if isinstance(from_state_args, (tuple, list)):
self.from_states = tuple(from_state_args)
else:
self.from_states = (from_state_args,)
|
class Invalidstatetransition(Exception):
pass
class State(object):
def __init__(self, initial=False, **kwargs):
self.initial = initial
def __eq__(self, other):
if isinstance(other, basestring):
return self.name == other
elif isinstance(other, State):
return self.name == other.name
else:
return False
def __ne__(self, other):
return not self == other
class Event(object):
def __init__(self, **kwargs):
self.to_state = kwargs.get('to_state', None)
self.from_states = tuple()
from_state_args = kwargs.get('from_states', tuple())
if isinstance(from_state_args, (tuple, list)):
self.from_states = tuple(from_state_args)
else:
self.from_states = (from_state_args,)
|
__author__ = 'Aleksander Chrabaszcz'
__all__ = ['config', 'parser', 'pyslate']
__version__ = '1.1'
|
__author__ = 'Aleksander Chrabaszcz'
__all__ = ['config', 'parser', 'pyslate']
__version__ = '1.1'
|
"""
Class to handle landmarkpoints
template pt {"y":392,"x":311,"point":0,"state":"visible"}
"""
class l_point(object):
def __init__(self, **kwargs):
self.__x__ = kwargs['x']
self.__y__ = kwargs['y']
'''
self.__indx__ = kwargs['point']
if kwargs['state'] in 'visible':
self.__vis__ = True
else:
self.__vis__ = False
'''
def get_pt(self):
return self.__x__, self.__y__
def index(self):
return self.__indx__
def state(self):
pass
|
"""
Class to handle landmarkpoints
template pt {"y":392,"x":311,"point":0,"state":"visible"}
"""
class L_Point(object):
def __init__(self, **kwargs):
self.__x__ = kwargs['x']
self.__y__ = kwargs['y']
"\n self.__indx__ = kwargs['point']\n if kwargs['state'] in 'visible':\n self.__vis__ = True\n else:\n self.__vis__ = False\n "
def get_pt(self):
return (self.__x__, self.__y__)
def index(self):
return self.__indx__
def state(self):
pass
|
H, W = map(int, input().split())
for _ in range(H):
C = input()
print(C)
print(C)
|
(h, w) = map(int, input().split())
for _ in range(H):
c = input()
print(C)
print(C)
|
class ResponseStatus:
"""Possible values for attendee's response status
* NEEDS_ACTION - The attendee has not responded to the invitation.
* DECLINED - The attendee has declined the invitation.
* TENTATIVE - The attendee has tentatively accepted the invitation.
* ACCEPTED - The attendee has accepted the invitation.
"""
NEEDS_ACTION = "needsAction"
DECLINED = "declined"
TENTATIVE = "tentative"
ACCEPTED = "accepted"
class Attendee:
def __init__(self,
email,
display_name=None,
comment=None,
optional=None,
is_resource=None,
additional_guests=None,
response_status=None):
"""Represents attendee of the event.
:param email:
the attendee's email address, if available.
:param display_name:
the attendee's name, if available
:param comment:
the attendee's response comment
:param optional:
whether this is an optional attendee. The default is False.
:param is_resource:
whether the attendee is a resource.
Can only be set when the attendee is added to the event
for the first time. Subsequent modifications are ignored.
The default is False.
:param additional_guests:
number of additional guests. The default is 0.
:param response_status:
the attendee's response status. See :py:class:`~gcsa.attendee.ResponseStatus`
"""
self.email = email
self.display_name = display_name
self.comment = comment
self.optional = optional
self.is_resource = is_resource
self.additional_guests = additional_guests
self.response_status = response_status
def __eq__(self, other):
return isinstance(other, Attendee) \
and self.email == other.email \
and self.display_name == other.display_name \
and self.comment == other.comment \
and self.optional == other.optional \
and self.is_resource == other.is_resource \
and self.additional_guests == other.additional_guests \
and self.response_status == other.response_status
|
class Responsestatus:
"""Possible values for attendee's response status
* NEEDS_ACTION - The attendee has not responded to the invitation.
* DECLINED - The attendee has declined the invitation.
* TENTATIVE - The attendee has tentatively accepted the invitation.
* ACCEPTED - The attendee has accepted the invitation.
"""
needs_action = 'needsAction'
declined = 'declined'
tentative = 'tentative'
accepted = 'accepted'
class Attendee:
def __init__(self, email, display_name=None, comment=None, optional=None, is_resource=None, additional_guests=None, response_status=None):
"""Represents attendee of the event.
:param email:
the attendee's email address, if available.
:param display_name:
the attendee's name, if available
:param comment:
the attendee's response comment
:param optional:
whether this is an optional attendee. The default is False.
:param is_resource:
whether the attendee is a resource.
Can only be set when the attendee is added to the event
for the first time. Subsequent modifications are ignored.
The default is False.
:param additional_guests:
number of additional guests. The default is 0.
:param response_status:
the attendee's response status. See :py:class:`~gcsa.attendee.ResponseStatus`
"""
self.email = email
self.display_name = display_name
self.comment = comment
self.optional = optional
self.is_resource = is_resource
self.additional_guests = additional_guests
self.response_status = response_status
def __eq__(self, other):
return isinstance(other, Attendee) and self.email == other.email and (self.display_name == other.display_name) and (self.comment == other.comment) and (self.optional == other.optional) and (self.is_resource == other.is_resource) and (self.additional_guests == other.additional_guests) and (self.response_status == other.response_status)
|
public_key = 28
# Store the discovered factors in this list
factors = []
# Begin testing at 2
test_number = 2
# Loop through all numbers from 2 up until the public_key number
while test_number < public_key:
# If the public key divides exactly into the test_number, it is a factor
if public_key % test_number == 0:
factors.append(test_number)
# Move on to the next number
test_number += 1
# Print the result
print(factors)
|
public_key = 28
factors = []
test_number = 2
while test_number < public_key:
if public_key % test_number == 0:
factors.append(test_number)
test_number += 1
print(factors)
|
# Display
default_screen_width = 940
default_screen_height = 600
screen_width = default_screen_width
screen_height = default_screen_height
is_native = False
max_tps = 16
board_width = 13
board_height = 9
theme = "neon" # neon/paper/football
# Sound
sound_volume = 0.1
sound_muted = False
|
default_screen_width = 940
default_screen_height = 600
screen_width = default_screen_width
screen_height = default_screen_height
is_native = False
max_tps = 16
board_width = 13
board_height = 9
theme = 'neon'
sound_volume = 0.1
sound_muted = False
|
# -*- coding: utf-8 -*-
"""
Functions for navigating trees represented as embedded dictionaries.
"""
__author__ = "Julian Jara-Ettinger"
__license__ = "MIT"
def BuildKeyList(Dictionary):
"""
WARNING: This function is for internal use.
Return a list of lists where each inner list is chain of valid keys which when input access the dictionary tree and retrieve a numerical sample.
"""
if isinstance(Dictionary, dict):
Result = []
for Key in Dictionary.keys():
List = BuildKeyList(Dictionary[Key])
List = [[Key] + x for x in List]
Result = Result + List
return Result
else:
return [[]]
def NormalizeDictionary(Dictionary, Constant):
"""
WARNING: This function is for internal use.
Divide all leaf values by a constant.
"""
if isinstance(Dictionary, dict):
for key in Dictionary.keys():
Dictionary[key] = NormalizeDictionary(Dictionary[key], Constant)
return Dictionary
else:
return Dictionary * 1.0 / Constant
def RetrieveValue(Dictionary, IndexPath):
"""
WARNING: This function is for internal use.
Enter dictionary recursively using IndexPath and return leaf value.
"""
if IndexPath == []:
return Dictionary
else:
return RetrieveValue(Dictionary[IndexPath[0]], IndexPath[1:])
def RecursiveDictionaryExtraction(Dictionary):
"""
WARNING: This function is for internal use.
This function goes into a tree structures as embedded dictionaries and returns the sum of all the leaves
"""
if isinstance(Dictionary, dict):
Values = [RecursiveDictionaryExtraction(
Dictionary[x]) for x in Dictionary.keys()]
return sum(Values)
else:
return Dictionary
|
"""
Functions for navigating trees represented as embedded dictionaries.
"""
__author__ = 'Julian Jara-Ettinger'
__license__ = 'MIT'
def build_key_list(Dictionary):
"""
WARNING: This function is for internal use.
Return a list of lists where each inner list is chain of valid keys which when input access the dictionary tree and retrieve a numerical sample.
"""
if isinstance(Dictionary, dict):
result = []
for key in Dictionary.keys():
list = build_key_list(Dictionary[Key])
list = [[Key] + x for x in List]
result = Result + List
return Result
else:
return [[]]
def normalize_dictionary(Dictionary, Constant):
"""
WARNING: This function is for internal use.
Divide all leaf values by a constant.
"""
if isinstance(Dictionary, dict):
for key in Dictionary.keys():
Dictionary[key] = normalize_dictionary(Dictionary[key], Constant)
return Dictionary
else:
return Dictionary * 1.0 / Constant
def retrieve_value(Dictionary, IndexPath):
"""
WARNING: This function is for internal use.
Enter dictionary recursively using IndexPath and return leaf value.
"""
if IndexPath == []:
return Dictionary
else:
return retrieve_value(Dictionary[IndexPath[0]], IndexPath[1:])
def recursive_dictionary_extraction(Dictionary):
"""
WARNING: This function is for internal use.
This function goes into a tree structures as embedded dictionaries and returns the sum of all the leaves
"""
if isinstance(Dictionary, dict):
values = [recursive_dictionary_extraction(Dictionary[x]) for x in Dictionary.keys()]
return sum(Values)
else:
return Dictionary
|
fib = [1, 1]
for i in range(2, 11):
fib.append(fib[i - 1] + fib[i - 2])
def c2f(c):
n = ord(c)
b = ''
for i in range(10, -1, -1):
if n >= fib[i]:
n -= fib[i]
b += '1'
else:
b += '0'
return b
ALPHABET = [chr(i) for i in range(33,126)]
print(ALPHABET)
flag = ['10000100100','10010000010','10010001010','10000100100','10010010010','10001000000','10100000000','10000100010','00101010000','10010010000','00101001010','10000101000','10000010010','00101010000','10010000000','10000101000','10000010010','10001000000','00101000100','10000100010','10010000100','00010101010','00101000100','00101000100','00101001010','10000101000','10100000100','00000100100']
flagd = ''
while(len(flagd) <= len(flag)):
for i in flag:
for c in ALPHABET:
if c2f(c) == i:
flagd += c
print(c)
break;
break;
print(flagd)
|
fib = [1, 1]
for i in range(2, 11):
fib.append(fib[i - 1] + fib[i - 2])
def c2f(c):
n = ord(c)
b = ''
for i in range(10, -1, -1):
if n >= fib[i]:
n -= fib[i]
b += '1'
else:
b += '0'
return b
alphabet = [chr(i) for i in range(33, 126)]
print(ALPHABET)
flag = ['10000100100', '10010000010', '10010001010', '10000100100', '10010010010', '10001000000', '10100000000', '10000100010', '00101010000', '10010010000', '00101001010', '10000101000', '10000010010', '00101010000', '10010000000', '10000101000', '10000010010', '10001000000', '00101000100', '10000100010', '10010000100', '00010101010', '00101000100', '00101000100', '00101001010', '10000101000', '10100000100', '00000100100']
flagd = ''
while len(flagd) <= len(flag):
for i in flag:
for c in ALPHABET:
if c2f(c) == i:
flagd += c
print(c)
break
break
print(flagd)
|
# -*- coding: utf-8 -*-
__version__ = "20.4.1a"
__author__ = "Taro Sato"
__author_email__ = "[email protected]"
__license__ = "MIT"
|
__version__ = '20.4.1a'
__author__ = 'Taro Sato'
__author_email__ = '[email protected]'
__license__ = 'MIT'
|
# represents the format of the string (see http://docs.python.org/library/datetime.html#strftime-strptime-behavior)
# format symbol "z" doesn't wok sometimes, maybe you will need to change csv2youtrack.to_unix_date(time_string)
DATE_FORMAT_STRING = ""
FIELD_NAMES = {
"Project" : "project",
"Summary" : "summary",
"Reporter" : "reporterName",
"Created" : "created",
"Updated" : "updated",
"Description" : "description"
}
FIELD_TYPES = {
"Fix versions" : "version[*]",
"State" : "state[1]",
"Assignee" : "user[1]",
"Affected versions" : "version[*]",
"Fixed in build" : "build[1]",
"Priority" : "enum[1]",
"Subsystem" : "ownedField[1]",
"Browser" : "enum[1]",
"OS" : "enum[1]",
"Verified in build" : "build[1]",
"Verified by" : "user[1]",
"Affected builds" : "build[*]",
"Fixed in builds" : "build[*]",
"Reviewed by" : "user[1]",
"Story points" : "integer",
"Value" : "integer",
"Marketing value" : "integer"
}
CONVERSION = {}
CSV_DELIMITER = ","
|
date_format_string = ''
field_names = {'Project': 'project', 'Summary': 'summary', 'Reporter': 'reporterName', 'Created': 'created', 'Updated': 'updated', 'Description': 'description'}
field_types = {'Fix versions': 'version[*]', 'State': 'state[1]', 'Assignee': 'user[1]', 'Affected versions': 'version[*]', 'Fixed in build': 'build[1]', 'Priority': 'enum[1]', 'Subsystem': 'ownedField[1]', 'Browser': 'enum[1]', 'OS': 'enum[1]', 'Verified in build': 'build[1]', 'Verified by': 'user[1]', 'Affected builds': 'build[*]', 'Fixed in builds': 'build[*]', 'Reviewed by': 'user[1]', 'Story points': 'integer', 'Value': 'integer', 'Marketing value': 'integer'}
conversion = {}
csv_delimiter = ','
|
# taken from: http://pjreddie.com/projects/mnist-in-csv/
# convert reads the binary data and outputs a csv file
def convert(image_file_path, label_file_path, csv_file_path, n):
# open files
images_file = open(image_file_path, "rb")
labels_file = open(label_file_path, "rb")
csv_file = open(csv_file_path, "w")
# read some kind of header
images_file.read(16)
labels_file.read(8)
# prepare array
images = []
# read images and labels
for _ in range(n):
image = []
for _ in range(28*28):
image.append(ord(images_file.read(1)))
image.append(ord(labels_file.read(1)))
images.append(image)
# write csv rows
for image in images:
csv_file.write(",".join(str(pix) for pix in image)+"\n")
# close files
images_file.close()
csv_file.close()
labels_file.close()
# convert train data set
convert(
"train-images-idx3-ubyte",
"train-labels-idx1-ubyte",
"train.csv",
60000
)
# convert test data set
convert(
"t10k-images-idx3-ubyte",
"t10k-labels-idx1-ubyte",
"test.csv",
10000
)
|
def convert(image_file_path, label_file_path, csv_file_path, n):
images_file = open(image_file_path, 'rb')
labels_file = open(label_file_path, 'rb')
csv_file = open(csv_file_path, 'w')
images_file.read(16)
labels_file.read(8)
images = []
for _ in range(n):
image = []
for _ in range(28 * 28):
image.append(ord(images_file.read(1)))
image.append(ord(labels_file.read(1)))
images.append(image)
for image in images:
csv_file.write(','.join((str(pix) for pix in image)) + '\n')
images_file.close()
csv_file.close()
labels_file.close()
convert('train-images-idx3-ubyte', 'train-labels-idx1-ubyte', 'train.csv', 60000)
convert('t10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte', 'test.csv', 10000)
|
# SPDX-License-Identifier: MIT
# Source: https://github.com/microsoft/MaskFlownet/tree/5cba12772e2201f0d1c1e27161d224e585334571
class Reader:
def __init__(self, obj, full_attr=""):
self._object = obj
self._full_attr = full_attr
def __getattr__(self, name):
if self._object is None:
ret = None
else:
ret = self._object.get(name, None)
return Reader(ret, self._full_attr + '.' + name)
def get(self, default=None):
if self._object is None:
print('Default FLAGS.{} to {}'.format(self._full_attr, default))
return default
else:
return self._object
@property
def value(self):
return self._object
|
class Reader:
def __init__(self, obj, full_attr=''):
self._object = obj
self._full_attr = full_attr
def __getattr__(self, name):
if self._object is None:
ret = None
else:
ret = self._object.get(name, None)
return reader(ret, self._full_attr + '.' + name)
def get(self, default=None):
if self._object is None:
print('Default FLAGS.{} to {}'.format(self._full_attr, default))
return default
else:
return self._object
@property
def value(self):
return self._object
|
"""
Relaxation methods
------------------
The multigrid cycle is formed by two complementary procedures: relaxation and
coarse-grid correction. The role of relaxation is to rapidly damp oscillatory
(high-frequency) errors out of the approximate solution. When the error is
smooth, it can then be accurately represented on the coarser grid, where a
solution, or approximate solution, can be computed.
Iterative methods for linear systems that have an error smoothing property
are valid relaxation methods. Since the purpose of a relaxation method is
to smooth oscillatory errors, its effectiveness on non-oscillatory errors
is not important. This point explains why simple iterative methods like
Gauss-Seidel iteration are effective relaxation methods while being very
slow to converge to the solution of Ax=b.
PyAMG implements relaxation methods of the following varieties:
1. Jacobi iteration
2. Gauss-Seidel iteration
3. Successive Over-Relaxation
4. Polynomial smoothing (e.g. Chebyshev)
5. Jacobi and Gauss-Seidel on the normal equations (A.H A and A A.H)
6. Krylov methods: gmres, cg, cgnr, cgne
7. No pre- or postsmoother
Refer to the docstrings of the individual methods for additional information.
"""
__docformat__ = "restructuredtext en"
# TODO: explain separation of basic methods from interface methods.
# TODO: explain why each class of methods exist
# (parallel vs. serial, SPD vs. indefinite)
postpone_import = 1
|
"""
Relaxation methods
------------------
The multigrid cycle is formed by two complementary procedures: relaxation and
coarse-grid correction. The role of relaxation is to rapidly damp oscillatory
(high-frequency) errors out of the approximate solution. When the error is
smooth, it can then be accurately represented on the coarser grid, where a
solution, or approximate solution, can be computed.
Iterative methods for linear systems that have an error smoothing property
are valid relaxation methods. Since the purpose of a relaxation method is
to smooth oscillatory errors, its effectiveness on non-oscillatory errors
is not important. This point explains why simple iterative methods like
Gauss-Seidel iteration are effective relaxation methods while being very
slow to converge to the solution of Ax=b.
PyAMG implements relaxation methods of the following varieties:
1. Jacobi iteration
2. Gauss-Seidel iteration
3. Successive Over-Relaxation
4. Polynomial smoothing (e.g. Chebyshev)
5. Jacobi and Gauss-Seidel on the normal equations (A.H A and A A.H)
6. Krylov methods: gmres, cg, cgnr, cgne
7. No pre- or postsmoother
Refer to the docstrings of the individual methods for additional information.
"""
__docformat__ = 'restructuredtext en'
postpone_import = 1
|
# Copyright 2016 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
class InitializationError(Exception):
"""Raised by RemoteClient.initialize on fatal errors."""
def __init__(self, last_error):
super(InitializationError, self).__init__('Failed to grab auth headers')
self.last_error = last_error
class BotCodeError(Exception):
"""Raised by RemoteClient.get_bot_code."""
def __init__(self, new_zip, url, version):
super(BotCodeError,
self).__init__('Unable to download %s from %s; first tried version %s'
% (new_zip, url, version))
class InternalError(Exception):
"""Raised on unrecoverable errors that abort task with 'internal error'."""
class PollError(Exception):
"""Raised on unrecoverable errors in RemoteClient.poll."""
class MintTokenError(Exception):
"""Raised on unrecoverable errors in RemoteClient.mint_*_token."""
|
class Initializationerror(Exception):
"""Raised by RemoteClient.initialize on fatal errors."""
def __init__(self, last_error):
super(InitializationError, self).__init__('Failed to grab auth headers')
self.last_error = last_error
class Botcodeerror(Exception):
"""Raised by RemoteClient.get_bot_code."""
def __init__(self, new_zip, url, version):
super(BotCodeError, self).__init__('Unable to download %s from %s; first tried version %s' % (new_zip, url, version))
class Internalerror(Exception):
"""Raised on unrecoverable errors that abort task with 'internal error'."""
class Pollerror(Exception):
"""Raised on unrecoverable errors in RemoteClient.poll."""
class Minttokenerror(Exception):
"""Raised on unrecoverable errors in RemoteClient.mint_*_token."""
|
"""Special Pythagorean triplet
A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
a^2 + b^2 = c^2
For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
There exists exactly one Pythagorean triplet for which a + b + c = 1000.
Find the product abc.
"""
def is_pythagorean_triplet(a, b, c):
"""Determine whether the provided numbers are a Pythagorean triplet.
Arguments:
a, b, c (int): Three integers.
Returns:
Boolean: True is the provided numbers are a Pythagorean triplet, False otherwise.
"""
return (a < b < c) and (a**2 + b**2 == c**2)
def pair_sums(total, least):
"""Find all pairs which add up to the provided sum.
Arguments:
total (int): Number to which returned pairs must sum.
least (int): The smallest integer which may be part of a returned pair.
Returns:
set of tuples: Containing pairs of integers adding up to the given sum.
"""
pairs = set()
for i in range(least, total - least):
pair = [i, total - i]
pair.sort()
pairs |= set([tuple(pair)])
return pairs
def find_triplet_product(total):
"""Find a Pythagorean triplet adding up to the provided sum.
Arguments:
total (int): An integer to which a triplet must sum.
Returns:
tuple of list and int: First Pythagorean triplet found and its product.
None: If no Pythagorean triplet summing to the provided total exists.
"""
triplets = []
for i in range(1, total):
pairs = pair_sums(total - i, i)
for pair in pairs:
triplet = [i]
triplet += pair
triplets.append(triplet)
for triplet in triplets:
a, b, c = triplet
if is_pythagorean_triplet(a, b, c):
return triplet, a * b * c
|
"""Special Pythagorean triplet
A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
a^2 + b^2 = c^2
For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
There exists exactly one Pythagorean triplet for which a + b + c = 1000.
Find the product abc.
"""
def is_pythagorean_triplet(a, b, c):
"""Determine whether the provided numbers are a Pythagorean triplet.
Arguments:
a, b, c (int): Three integers.
Returns:
Boolean: True is the provided numbers are a Pythagorean triplet, False otherwise.
"""
return a < b < c and a ** 2 + b ** 2 == c ** 2
def pair_sums(total, least):
"""Find all pairs which add up to the provided sum.
Arguments:
total (int): Number to which returned pairs must sum.
least (int): The smallest integer which may be part of a returned pair.
Returns:
set of tuples: Containing pairs of integers adding up to the given sum.
"""
pairs = set()
for i in range(least, total - least):
pair = [i, total - i]
pair.sort()
pairs |= set([tuple(pair)])
return pairs
def find_triplet_product(total):
"""Find a Pythagorean triplet adding up to the provided sum.
Arguments:
total (int): An integer to which a triplet must sum.
Returns:
tuple of list and int: First Pythagorean triplet found and its product.
None: If no Pythagorean triplet summing to the provided total exists.
"""
triplets = []
for i in range(1, total):
pairs = pair_sums(total - i, i)
for pair in pairs:
triplet = [i]
triplet += pair
triplets.append(triplet)
for triplet in triplets:
(a, b, c) = triplet
if is_pythagorean_triplet(a, b, c):
return (triplet, a * b * c)
|
"""
Python program to check whether given tree is binary search tree(BST) or not
[BST details - https://en.wikipedia.org/wiki/Binary_search_tree]
"""
class Node:
# Create node for binary tree
def __init__(self, v):
self.value = v
self.left = None
self.right = None
def inorder_traversal(node, arr):
if node is None:
return None
inorder_traversal(node.left, arr)
arr.append(node.value)
inorder_traversal(node.right, arr)
def is_bst(node):
"""
return true if given tree is a valid bst else return false
"""
arr = list()
inorder_traversal(node, arr)
# check if inorder traversal of tree is sorted i.e. strictly increasing
for i in range(1, len(arr)):
if arr[i] <= arr[i - 1]:
return False
return True
if __name__ == "__main__":
"""
Creating Tree
4
/ \
2 5
/ \
1 3
"""
root = Node(4)
root.left = Node(2)
root.right = Node(5)
root.left.left = Node(1)
root.left.right = Node(3)
print(is_bst(root)) # Output: True
|
"""
Python program to check whether given tree is binary search tree(BST) or not
[BST details - https://en.wikipedia.org/wiki/Binary_search_tree]
"""
class Node:
def __init__(self, v):
self.value = v
self.left = None
self.right = None
def inorder_traversal(node, arr):
if node is None:
return None
inorder_traversal(node.left, arr)
arr.append(node.value)
inorder_traversal(node.right, arr)
def is_bst(node):
"""
return true if given tree is a valid bst else return false
"""
arr = list()
inorder_traversal(node, arr)
for i in range(1, len(arr)):
if arr[i] <= arr[i - 1]:
return False
return True
if __name__ == '__main__':
'\n Creating Tree\n 4\n / 2 5\n / 1 3\n '
root = node(4)
root.left = node(2)
root.right = node(5)
root.left.left = node(1)
root.left.right = node(3)
print(is_bst(root))
|
# Write your solution for 1.4 here!
def is_prime(x):
# mod = x
for i in range(x-1, 1, -1):
if x % i == 0 :
return False
return True
# else:
# return True
# is_prime(8)
print(is_prime(5191))
|
def is_prime(x):
for i in range(x - 1, 1, -1):
if x % i == 0:
return False
return True
print(is_prime(5191))
|
#!/usr/bin/env python
# Copyright 2020 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def define_env(env):
"""Hook function"""
@env.macro
def kops_feature_table(**kwargs):
"""
Generate a markdown table which will be rendered when called, along with the supported passed keyword args.
:param kwargs:
kops_added_ff => Kops version in which this feature was added as a feature flag
kops_added_default => Kops version in which this feature was introduced as stable
k8s_min => Minimum k8s version which supports this feature
:return: rendered markdown table
"""
# this dict object maps the kwarg to its description, which will be used in the final table
supported_args = {
'kops_added_ff': 'Alpha (Feature Flag)',
'kops_added_default': 'Default',
'k8s_min': 'Minimum K8s Version'
}
# Create the initial strings to which we'll concatenate the relevant columns
title = '|'
separators = '|'
values = '|'
# Iterate over provided supported kwargs and match them with the provided values.
for arg, header in supported_args.items():
if arg not in kwargs.keys():
continue
if arg == 'kops_added_default' and 'kops_added_ff' not in kwargs.keys():
title += ' Introduced |'
else:
title += f' {header} |'
separators += ' :-: |'
if arg == 'k8s_min':
values += f' K8s {kwargs[arg]} |'
else:
values += f' Kops {kwargs[arg]} |'
# Create a list object containing all the table rows,
# Then return a string object which contains every list item in a new line.
table = [
title,
separators,
values
]
return '\n'.join(table)
def main():
pass
if __name__ == "__main__":
main()
|
def define_env(env):
"""Hook function"""
@env.macro
def kops_feature_table(**kwargs):
"""
Generate a markdown table which will be rendered when called, along with the supported passed keyword args.
:param kwargs:
kops_added_ff => Kops version in which this feature was added as a feature flag
kops_added_default => Kops version in which this feature was introduced as stable
k8s_min => Minimum k8s version which supports this feature
:return: rendered markdown table
"""
supported_args = {'kops_added_ff': 'Alpha (Feature Flag)', 'kops_added_default': 'Default', 'k8s_min': 'Minimum K8s Version'}
title = '|'
separators = '|'
values = '|'
for (arg, header) in supported_args.items():
if arg not in kwargs.keys():
continue
if arg == 'kops_added_default' and 'kops_added_ff' not in kwargs.keys():
title += ' Introduced |'
else:
title += f' {header} |'
separators += ' :-: |'
if arg == 'k8s_min':
values += f' K8s {kwargs[arg]} |'
else:
values += f' Kops {kwargs[arg]} |'
table = [title, separators, values]
return '\n'.join(table)
def main():
pass
if __name__ == '__main__':
main()
|
class CurveLoopIterator(object,IEnumerator[Curve],IDisposable,IEnumerator):
""" An iterator to a curve loop. """
def Dispose(self):
""" Dispose(self: CurveLoopIterator) """
pass
def MoveNext(self):
"""
MoveNext(self: CurveLoopIterator) -> bool
Increments the iterator to the next item.
Returns: True if there is a next available item in this iterator.
False if the
iterator has completed all available items.
"""
pass
def next(self,*args):
""" next(self: object) -> object """
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: CurveLoopIterator,disposing: bool) """
pass
def Reset(self):
"""
Reset(self: CurveLoopIterator)
Resets the iterator to the initial state.
"""
pass
def __contains__(self,*args):
""" __contains__[Curve](enumerator: IEnumerator[Curve],value: Curve) -> bool """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerator) -> object """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
Current=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the item at the current position of the iterator.
Get: Current(self: CurveLoopIterator) -> Curve
"""
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: CurveLoopIterator) -> bool
"""
|
class Curveloopiterator(object, IEnumerator[Curve], IDisposable, IEnumerator):
""" An iterator to a curve loop. """
def dispose(self):
""" Dispose(self: CurveLoopIterator) """
pass
def move_next(self):
"""
MoveNext(self: CurveLoopIterator) -> bool
Increments the iterator to the next item.
Returns: True if there is a next available item in this iterator.
False if the
iterator has completed all available items.
"""
pass
def next(self, *args):
""" next(self: object) -> object """
pass
def release_unmanaged_resources(self, *args):
""" ReleaseUnmanagedResources(self: CurveLoopIterator,disposing: bool) """
pass
def reset(self):
"""
Reset(self: CurveLoopIterator)
Resets the iterator to the initial state.
"""
pass
def __contains__(self, *args):
""" __contains__[Curve](enumerator: IEnumerator[Curve],value: Curve) -> bool """
pass
def __enter__(self, *args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self, *args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self, *args):
""" __iter__(self: IEnumerator) -> object """
pass
def __repr__(self, *args):
""" __repr__(self: object) -> str """
pass
current = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Gets the item at the current position of the iterator.\n\n\n\nGet: Current(self: CurveLoopIterator) -> Curve\n\n\n\n'
is_valid_object = property(lambda self: object(), lambda self, v: None, lambda self: None)
'Specifies whether the .NET object represents a valid Revit entity.\n\n\n\nGet: IsValidObject(self: CurveLoopIterator) -> bool\n\n\n\n'
|
class InternalServerError(Exception):
pass
class SchemaValidationError(Exception):
pass
class EmailAlreadyExistsError(Exception):
pass
class UnauthorizedError(Exception):
pass
class NoAuthorizationError(Exception):
pass
class UpdatingUserError(Exception):
pass
class DeletingUserError(Exception):
pass
class UserNotExistsError(Exception):
pass
errors = {
"InternalServerError": {
"message": "Something went wrong",
"status": 500
},
"SchemaValidationError": {
"message": "Request is missing required fields",
"status": 400
},
"EmailAlreadyExistsError": {
"message": "User with given email address already exists",
"status": 400
},
"UnauthorizedError": {
"message": "Invalid username or password",
"status": 401
},
"NoAuthorizationError": {
"message": "Missing Authorization Header",
"status": 401
},
"UpdatingUserError": {
"message": "Updating user added by other is forbidden",
"status": 403
},
"DeletingUserError": {
"message": "Deleting user added by other is forbidden",
"status": 403
},
"UserNotExistsError": {
"message": "User with given id doesn't exists",
"status": 400
}
}
|
class Internalservererror(Exception):
pass
class Schemavalidationerror(Exception):
pass
class Emailalreadyexistserror(Exception):
pass
class Unauthorizederror(Exception):
pass
class Noauthorizationerror(Exception):
pass
class Updatingusererror(Exception):
pass
class Deletingusererror(Exception):
pass
class Usernotexistserror(Exception):
pass
errors = {'InternalServerError': {'message': 'Something went wrong', 'status': 500}, 'SchemaValidationError': {'message': 'Request is missing required fields', 'status': 400}, 'EmailAlreadyExistsError': {'message': 'User with given email address already exists', 'status': 400}, 'UnauthorizedError': {'message': 'Invalid username or password', 'status': 401}, 'NoAuthorizationError': {'message': 'Missing Authorization Header', 'status': 401}, 'UpdatingUserError': {'message': 'Updating user added by other is forbidden', 'status': 403}, 'DeletingUserError': {'message': 'Deleting user added by other is forbidden', 'status': 403}, 'UserNotExistsError': {'message': "User with given id doesn't exists", 'status': 400}}
|
"""This rule gathers all .proto files used by all of its dependencies.
The entire dependency tree is searched. The search crosses through cc_library
rules and portable_proto_library rules to collect the transitive set of all
.proto dependencies. This is provided to other rules in the form of a "proto"
provider, using the transitive_sources field.
This rule uses aspects. For general information on the concept, see:
- go/bazel-aspects-ides-tools
- go/bazel-aspects
The basic rule is transitive_protos. Example:
proto_library(
name = "a_proto_library",
srcs = ["a.proto],
)
proto_library(
name = "b_proto_library",
srcs = ["b.proto],
)
cc_library(
name = "a_cc_library",
deps = ["b_proto_library],
)
transitive_protos(
name = "all_my_protos",
deps = [
"a_proto_library",
"a_cc_library",
],
)
all_my_protos will gather all proto files used in its dependency tree; in this
case, ["a.proto", "b.proto"]. These are provided as the default outputs of this
rule, so you can place the rule in any context that requires a list of files,
and also as a "proto" provider, for use by any rules that would normally depend
on proto_library.
The dependency tree is explored using an aspect, transitive_protos_aspect. This
aspect propagates across two attributes, "deps" and "hdrs". The latter is used
for compatibility with portable_proto_library; see comments below and in that
file for more details.
At each visited node in the tree, the aspect collects protos:
- direct_sources from the proto provider in the current node. This is filled in
by proto_library nodes, and also by piggyback_header nodes (see
portable_proto_build_defs.bzl).
- protos from the transitive_protos provider in dependency nodes, found from
both the "deps" and the "hdrs" aspect.
Then it puts all the protos in the protos field of the transitive_protos
provider which it generates. This is how each node sends its gathered protos up
the tree.
"""
def _gather_transitive_protos_deps(deps, my_protos = [], my_descriptors = [], my_proto_libs = []):
useful_deps = [dep for dep in deps if hasattr(dep, "transitive_protos")]
protos = depset(
my_protos,
transitive = [dep.transitive_protos.protos for dep in useful_deps],
)
proto_libs = depset(
my_proto_libs,
transitive = [dep.transitive_protos.proto_libs for dep in useful_deps],
)
descriptors = depset(
my_descriptors,
transitive = [dep.transitive_protos.descriptors for dep in useful_deps],
)
return struct(
transitive_protos = struct(
protos = protos,
descriptors = descriptors,
proto_libs = proto_libs,
),
)
def _transitive_protos_aspect_impl(target, ctx):
"""Implementation of the transitive_protos_aspect aspect.
Args:
target: The current target.
ctx: The current rule context.
Returns:
A transitive_protos provider.
"""
protos = target.proto.direct_sources if hasattr(target, "proto") else []
deps = ctx.rule.attr.deps[:] if hasattr(ctx.rule.attr, "deps") else []
descriptors = [target.proto.direct_descriptor_set] if hasattr(target, "proto") and hasattr(target.proto, "direct_descriptor_set") else []
proto_libs = []
if ctx.rule.kind == "proto_library":
proto_libs = [f for f in target.files.to_list() if f.extension == "a"]
# Searching through the hdrs attribute is necessary because of
# portable_proto_library. In portable mode, that macro
# generates a cc_library that does not depend on any proto_libraries, so
# the .proto files do not appear in its dependency tree.
# portable_proto_library cannot add arbitrary providers or attributes to
# a cc_library rule, so instead it piggybacks the provider on a rule that
# generates a header, which occurs in the hdrs attribute of the cc_library.
if hasattr(ctx.rule.attr, "hdrs"):
deps += ctx.rule.attr.hdrs
result = _gather_transitive_protos_deps(deps, protos, descriptors, proto_libs)
return result
transitive_protos_aspect = aspect(
implementation = _transitive_protos_aspect_impl,
attr_aspects = ["deps", "hdrs"],
attrs = {},
)
def _transitive_protos_impl(ctx):
"""Implementation of transitive_protos rule.
Args:
ctx: The rule context.
Returns:
A proto provider (with transitive_sources and transitive_descriptor_sets filled in),
and marks all transitive sources as default output.
"""
gathered = _gather_transitive_protos_deps(ctx.attr.deps)
protos = gathered.transitive_protos.protos
descriptors = gathered.transitive_protos.descriptors
return struct(
proto = struct(
transitive_sources = protos,
transitive_descriptor_sets = descriptors,
),
files = depset(protos),
)
transitive_protos = rule(
implementation = _transitive_protos_impl,
attrs = {
"deps": attr.label_list(
aspects = [transitive_protos_aspect],
),
},
)
def _transitive_proto_cc_libs_impl(ctx):
"""Implementation of transitive_proto_cc_libs rule.
NOTE: this only works on Bazel, not exobazel.
Args:
ctx: The rule context.
Returns:
All transitive proto C++ .a files as default output.
"""
gathered = _gather_transitive_protos_deps(ctx.attr.deps)
proto_libs = gathered.transitive_protos.proto_libs
return struct(
files = proto_libs,
)
transitive_proto_cc_libs = rule(
implementation = _transitive_proto_cc_libs_impl,
attrs = {
"deps": attr.label_list(
aspects = [transitive_protos_aspect],
),
},
)
def _transitive_proto_descriptor_sets_impl(ctx):
"""Implementation of transitive_proto_descriptor_sets rule.
Args:
ctx: The rule context.
Returns:
All transitive proto descriptor files as default output.
"""
gathered = _gather_transitive_protos_deps(ctx.attr.deps)
descriptors = gathered.transitive_protos.descriptors
return struct(
files = descriptors,
)
transitive_proto_descriptor_sets = rule(
implementation = _transitive_proto_descriptor_sets_impl,
attrs = {
"deps": attr.label_list(
aspects = [transitive_protos_aspect],
),
},
)
|
"""This rule gathers all .proto files used by all of its dependencies.
The entire dependency tree is searched. The search crosses through cc_library
rules and portable_proto_library rules to collect the transitive set of all
.proto dependencies. This is provided to other rules in the form of a "proto"
provider, using the transitive_sources field.
This rule uses aspects. For general information on the concept, see:
- go/bazel-aspects-ides-tools
- go/bazel-aspects
The basic rule is transitive_protos. Example:
proto_library(
name = "a_proto_library",
srcs = ["a.proto],
)
proto_library(
name = "b_proto_library",
srcs = ["b.proto],
)
cc_library(
name = "a_cc_library",
deps = ["b_proto_library],
)
transitive_protos(
name = "all_my_protos",
deps = [
"a_proto_library",
"a_cc_library",
],
)
all_my_protos will gather all proto files used in its dependency tree; in this
case, ["a.proto", "b.proto"]. These are provided as the default outputs of this
rule, so you can place the rule in any context that requires a list of files,
and also as a "proto" provider, for use by any rules that would normally depend
on proto_library.
The dependency tree is explored using an aspect, transitive_protos_aspect. This
aspect propagates across two attributes, "deps" and "hdrs". The latter is used
for compatibility with portable_proto_library; see comments below and in that
file for more details.
At each visited node in the tree, the aspect collects protos:
- direct_sources from the proto provider in the current node. This is filled in
by proto_library nodes, and also by piggyback_header nodes (see
portable_proto_build_defs.bzl).
- protos from the transitive_protos provider in dependency nodes, found from
both the "deps" and the "hdrs" aspect.
Then it puts all the protos in the protos field of the transitive_protos
provider which it generates. This is how each node sends its gathered protos up
the tree.
"""
def _gather_transitive_protos_deps(deps, my_protos=[], my_descriptors=[], my_proto_libs=[]):
useful_deps = [dep for dep in deps if hasattr(dep, 'transitive_protos')]
protos = depset(my_protos, transitive=[dep.transitive_protos.protos for dep in useful_deps])
proto_libs = depset(my_proto_libs, transitive=[dep.transitive_protos.proto_libs for dep in useful_deps])
descriptors = depset(my_descriptors, transitive=[dep.transitive_protos.descriptors for dep in useful_deps])
return struct(transitive_protos=struct(protos=protos, descriptors=descriptors, proto_libs=proto_libs))
def _transitive_protos_aspect_impl(target, ctx):
"""Implementation of the transitive_protos_aspect aspect.
Args:
target: The current target.
ctx: The current rule context.
Returns:
A transitive_protos provider.
"""
protos = target.proto.direct_sources if hasattr(target, 'proto') else []
deps = ctx.rule.attr.deps[:] if hasattr(ctx.rule.attr, 'deps') else []
descriptors = [target.proto.direct_descriptor_set] if hasattr(target, 'proto') and hasattr(target.proto, 'direct_descriptor_set') else []
proto_libs = []
if ctx.rule.kind == 'proto_library':
proto_libs = [f for f in target.files.to_list() if f.extension == 'a']
if hasattr(ctx.rule.attr, 'hdrs'):
deps += ctx.rule.attr.hdrs
result = _gather_transitive_protos_deps(deps, protos, descriptors, proto_libs)
return result
transitive_protos_aspect = aspect(implementation=_transitive_protos_aspect_impl, attr_aspects=['deps', 'hdrs'], attrs={})
def _transitive_protos_impl(ctx):
"""Implementation of transitive_protos rule.
Args:
ctx: The rule context.
Returns:
A proto provider (with transitive_sources and transitive_descriptor_sets filled in),
and marks all transitive sources as default output.
"""
gathered = _gather_transitive_protos_deps(ctx.attr.deps)
protos = gathered.transitive_protos.protos
descriptors = gathered.transitive_protos.descriptors
return struct(proto=struct(transitive_sources=protos, transitive_descriptor_sets=descriptors), files=depset(protos))
transitive_protos = rule(implementation=_transitive_protos_impl, attrs={'deps': attr.label_list(aspects=[transitive_protos_aspect])})
def _transitive_proto_cc_libs_impl(ctx):
"""Implementation of transitive_proto_cc_libs rule.
NOTE: this only works on Bazel, not exobazel.
Args:
ctx: The rule context.
Returns:
All transitive proto C++ .a files as default output.
"""
gathered = _gather_transitive_protos_deps(ctx.attr.deps)
proto_libs = gathered.transitive_protos.proto_libs
return struct(files=proto_libs)
transitive_proto_cc_libs = rule(implementation=_transitive_proto_cc_libs_impl, attrs={'deps': attr.label_list(aspects=[transitive_protos_aspect])})
def _transitive_proto_descriptor_sets_impl(ctx):
"""Implementation of transitive_proto_descriptor_sets rule.
Args:
ctx: The rule context.
Returns:
All transitive proto descriptor files as default output.
"""
gathered = _gather_transitive_protos_deps(ctx.attr.deps)
descriptors = gathered.transitive_protos.descriptors
return struct(files=descriptors)
transitive_proto_descriptor_sets = rule(implementation=_transitive_proto_descriptor_sets_impl, attrs={'deps': attr.label_list(aspects=[transitive_protos_aspect])})
|
#
# PySNMP MIB module DELL-NETWORKING-COPY-CONFIG-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DELL-NETWORKING-COPY-CONFIG-MIB
# Produced by pysmi-0.3.4 at Wed May 1 12:37:52 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
dellNetMgmt, = mibBuilder.importSymbols("DELL-NETWORKING-SMI", "dellNetMgmt")
InetAddress, InetAddressType = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress", "InetAddressType")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
NotificationType, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Bits, Counter32, ObjectIdentity, Counter64, ModuleIdentity, Gauge32, Unsigned32, IpAddress, iso, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Bits", "Counter32", "ObjectIdentity", "Counter64", "ModuleIdentity", "Gauge32", "Unsigned32", "IpAddress", "iso", "TimeTicks")
RowStatus, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "DisplayString", "TextualConvention")
dellNetCopyConfigMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 6027, 3, 5))
dellNetCopyConfigMib.setRevisions(('2009-05-14 13:00', '2007-06-19 12:00', '2003-03-01 12:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: dellNetCopyConfigMib.setRevisionsDescriptions(('Added New enum for usbflash filesystem in Exascale', 'Update description to copy from remote server to local', 'Initial Revision',))
if mibBuilder.loadTexts: dellNetCopyConfigMib.setLastUpdated('200905141300Z')
if mibBuilder.loadTexts: dellNetCopyConfigMib.setOrganization('Dell Inc.')
if mibBuilder.loadTexts: dellNetCopyConfigMib.setContactInfo('http://www.dell.com/support')
if mibBuilder.loadTexts: dellNetCopyConfigMib.setDescription('Dell Networking OS Copy Config MIB provides copying of running-config to to startup-config and vice-versa, and Dell Networking OS files to local disk or other system via ftp or tftp. ')
dellNetCopyConfigObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1))
dellNetCopyConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1))
dellNetCopyConfigTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2))
class DellNetConfigFileLocation(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("flash", 1), ("slot0", 2), ("tftp", 3), ("ftp", 4), ("scp", 5), ("usbflash", 6), ("nfsmount", 7))
class DellNetConfigFileType(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("dellNetFile", 1), ("runningConfig", 2), ("startupConfig", 3))
class DellNetConfigCopyState(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3))
namedValues = NamedValues(("running", 1), ("successful", 2), ("failed", 3))
class DellNetConfigCopyFailCause(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))
namedValues = NamedValues(("badFileName", 1), ("copyInProgress", 2), ("diskFull", 3), ("fileExist", 4), ("fileNotFound", 5), ("timeout", 6), ("unknown", 7))
dellNetCopyTable = MibTable((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1), )
if mibBuilder.loadTexts: dellNetCopyTable.setStatus('current')
if mibBuilder.loadTexts: dellNetCopyTable.setDescription('A table of config-copy requests.')
dellNetCopyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1), ).setIndexNames((0, "DELL-NETWORKING-COPY-CONFIG-MIB", "copyConfigIndex"))
if mibBuilder.loadTexts: dellNetCopyEntry.setStatus('current')
if mibBuilder.loadTexts: dellNetCopyEntry.setDescription('A config-copy request. To use this copy on NMS, user must first query the MIB. if the query returns the result of the previous copied and there is no pending copy operation, user can submit a SNMP SET with a random number as index with the appropraite information as specified by this MIB and the row status as CreateAndGo. The system will only keep the last 5 copy requests as the history. If there are ten entries in the copy request table, the subsequent copy request will replace the existing one in the copy table. 1) To copy running-config from local directory to startup-config. Set the following mib objects in the copy table copySrcFileType : runningConfig (2) copyDestFileType : startupConfig (3) 2) To copy startup-config from local directory to a remote site. Set the following mib objects in the copy table copySrcFileType : startupConfig (3) copyDestFileType : dellNetFile (1) copyDestFileLocation : ftp (4) copyDestFileName : /user/tester1/ftp/ copyServerAddress : 172.20.10.123 copyUserName : tester1 copyUserPassword : mypasswd 3) To copy a file from local directory to a remote site. Set the following mib objects in the copy table copySrcFileType : dellNetFile (1) copySrcFileLocation : slot0 (2) copySrcFileName : NVTRACE_LOG_DIR/LP4-nvtrace-0 copyDestFileType : dellNetFile (1) copyDestFileLocation : ftp (4) copyDestFileName : /usr/tester1/trace/backup/LP4-nvtrace-0 copyServerAddress : 172.20.10.123 copyUserName : tester1 copyUserPassword : mypasswd ')
copyConfigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: copyConfigIndex.setStatus('current')
if mibBuilder.loadTexts: copyConfigIndex.setDescription('To initiate a config copy request, user should assign a positive random value as an index. ')
copySrcFileType = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 2), DellNetConfigFileType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: copySrcFileType.setStatus('current')
if mibBuilder.loadTexts: copySrcFileType.setDescription('Specifies the type of file to copy from. if the copySrcFileType is runningConfig(2) or startupConfig(3), the default DellNetConfigFileLocation is flash(1). If the copySrcFileType has the value of dellNetFile(1), it is expected that the copySrcFileLocation and copySrcFileName must also be spcified. The three objects together will uniquely identify the source file. ')
copySrcFileLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 3), DellNetConfigFileLocation()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: copySrcFileLocation.setStatus('current')
if mibBuilder.loadTexts: copySrcFileLocation.setDescription('Specifies the location of source file. If the copySrcFileType has the value of dellNetFile(1), it is expected that the copySrcFileType and copySrcFileName must also be spcified. The three objects together will uniquely identify the source file. If the copySrcFileLocation has the value of ftp(4) or scp(5), it is expected the login information liked copyServerAddress, copyUserName, and copyUserPassword also be spcified. ')
copySrcFileName = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: copySrcFileName.setStatus('current')
if mibBuilder.loadTexts: copySrcFileName.setDescription('The file name (including the path, if applicable) of the file. If copySourceFileType is set to runningConfig or startupConfig, copySrcFileName is not needed. ')
copyDestFileType = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 5), DellNetConfigFileType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: copyDestFileType.setStatus('current')
if mibBuilder.loadTexts: copyDestFileType.setDescription('Specifies the type of file to copy to. if the copyDestFileType is runningConfig(2) or startupConfig(3), the default dellNetDestFileLocation is flash(1). If the copyDestFileType has the value of dellNetFile(1), it is expected that the copyDestFileLocation and copyDestFileName must also be spcified. The three objects together will uniquely identify the destination file. ')
copyDestFileLocation = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 6), DellNetConfigFileLocation()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: copyDestFileLocation.setStatus('current')
if mibBuilder.loadTexts: copyDestFileLocation.setDescription('Specifies the location of destination file. If the copyDestFileType has the value of dellNetFile(1), it is expected that the copyDestFileType and copyDestFileName must also be spcified. The three objects together will uniquely identify the destination file. If the copyDestFileLocation has the value of ftp(4) or scp(5), it is expected the login information liked copyServerAddress, copyUserName, and copyUserPassword also be spcified. ')
copyDestFileName = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 7), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: copyDestFileName.setStatus('current')
if mibBuilder.loadTexts: copyDestFileName.setDescription('Specifies the location of destination file. If the copyDestFileType has the value of dellNetFile(1), it is expected that the dellNetCopyDestFileTyp and copyDestFileLocation must also be spcified. The three objects together will uniquely identify the source file. If the copyDestFileLocation has the value of ftp(4) or scp(5), it is expected the login information liked copyServerAddress, copyUserName, and copyUserPassword also be spcified. ')
copyServerAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 8), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: copyServerAddress.setStatus('deprecated')
if mibBuilder.loadTexts: copyServerAddress.setDescription('The ip address of the tftp server from (or to) which to copy the configuration file. Values of 0.0.0.0 or FF.FF.FF.FF for copyServerAddress are not allowed. If the copyDestFileLocation has the value of ftp(4) or scp(5), it is expected the login information liked copyServerAddress, copyUserName, and copyUserPassword also be spcified. ')
copyUserName = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: copyUserName.setStatus('current')
if mibBuilder.loadTexts: copyUserName.setDescription('Remote user name for copy via ftp, or scp. If the copyDestFileLocation has the value of ftp(4) or scp(5), it is expected the login information liked copyServerAddress, copyUserName, and copyUserPassword also be spcified. ')
copyUserPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: copyUserPassword.setStatus('current')
if mibBuilder.loadTexts: copyUserPassword.setDescription('Password used by ftp, scp for copying a file to an ftp/scp server. If the copyDestFileLocation has the value of ftp(4) or scp(5), it is expected the login information liked copyServerAddress, copyUserName, and copyUserPassword also be spcified. ')
copyState = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 11), DellNetConfigCopyState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: copyState.setStatus('current')
if mibBuilder.loadTexts: copyState.setDescription(' The state of config-copy operation. ')
copyTimeStarted = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 12), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: copyTimeStarted.setStatus('current')
if mibBuilder.loadTexts: copyTimeStarted.setDescription(' The timetick when the copy started. ')
copyTimeCompleted = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 13), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: copyTimeCompleted.setStatus('current')
if mibBuilder.loadTexts: copyTimeCompleted.setDescription(' The timetick when the copy completed. ')
copyFailCause = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 14), DellNetConfigCopyFailCause()).setMaxAccess("readonly")
if mibBuilder.loadTexts: copyFailCause.setStatus('current')
if mibBuilder.loadTexts: copyFailCause.setDescription(' The reason a config-copy request failed. ')
copyEntryRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 15), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: copyEntryRowStatus.setStatus('current')
if mibBuilder.loadTexts: copyEntryRowStatus.setDescription(' The state of the copy operation. Uses CreateAndGo when you are performing the copy. The state is set to active when the copy is completed. ')
copyServerInetAddressType = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 16), InetAddressType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: copyServerInetAddressType.setStatus('current')
if mibBuilder.loadTexts: copyServerInetAddressType.setDescription(' The address type of copyServerInetAddress. Only ipv4 (1), ipv6 (2) and dns (16) types are supported. ')
copyServerInetAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 17), InetAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: copyServerInetAddress.setStatus('current')
if mibBuilder.loadTexts: copyServerInetAddress.setDescription(' The IP address of the address ftp/tftp/scp server from or to which to copy the configuration file. If the copyDestFileLocation has the value of ftp(4) or scp(5), it is expected the login information copyUserName and copyUserPassword also be spcified. ')
copyAlarmMibNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2, 0))
copyAlarmVariable = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2, 1))
copyAlarmLevel = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2, 1, 1), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: copyAlarmLevel.setStatus('current')
if mibBuilder.loadTexts: copyAlarmLevel.setDescription('the message warning level')
copyAlarmString = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2, 1, 2), OctetString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: copyAlarmString.setStatus('current')
if mibBuilder.loadTexts: copyAlarmString.setDescription('An generic string value in the TRAP object')
copyAlarmIndex = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2, 1, 3), Integer32()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: copyAlarmIndex.setStatus('current')
if mibBuilder.loadTexts: copyAlarmIndex.setDescription("the index of the current copy. Indicates the index of the current copy, i.e. copyConfigIndex of dellNetCopyTable. Set to '-1' if copy executed by CLI")
copyConfigCompleted = NotificationType((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2, 0, 1)).setObjects(("DELL-NETWORKING-COPY-CONFIG-MIB", "copyAlarmLevel"), ("DELL-NETWORKING-COPY-CONFIG-MIB", "copyAlarmString"), ("DELL-NETWORKING-COPY-CONFIG-MIB", "copyAlarmIndex"))
if mibBuilder.loadTexts: copyConfigCompleted.setStatus('current')
if mibBuilder.loadTexts: copyConfigCompleted.setDescription('The agent generate this trap when a copy operational is completed.')
configConflict = NotificationType((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2, 0, 2)).setObjects(("DELL-NETWORKING-COPY-CONFIG-MIB", "copyAlarmLevel"), ("DELL-NETWORKING-COPY-CONFIG-MIB", "copyAlarmString"), ("DELL-NETWORKING-COPY-CONFIG-MIB", "copyAlarmIndex"))
if mibBuilder.loadTexts: configConflict.setStatus('current')
if mibBuilder.loadTexts: configConflict.setDescription('The agent generate this trap when a configuration conflict found during audit.')
configConflictClear = NotificationType((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2, 0, 3)).setObjects(("DELL-NETWORKING-COPY-CONFIG-MIB", "copyAlarmLevel"), ("DELL-NETWORKING-COPY-CONFIG-MIB", "copyAlarmString"), ("DELL-NETWORKING-COPY-CONFIG-MIB", "copyAlarmIndex"))
if mibBuilder.loadTexts: configConflictClear.setStatus('current')
if mibBuilder.loadTexts: configConflictClear.setDescription('The agent generate this trap when a configuration conflict resolved during audit.')
batchConfigCommitProgress = NotificationType((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2, 0, 4)).setObjects(("DELL-NETWORKING-COPY-CONFIG-MIB", "copyAlarmLevel"), ("DELL-NETWORKING-COPY-CONFIG-MIB", "copyAlarmString"), ("DELL-NETWORKING-COPY-CONFIG-MIB", "copyAlarmIndex"))
if mibBuilder.loadTexts: batchConfigCommitProgress.setStatus('current')
if mibBuilder.loadTexts: batchConfigCommitProgress.setDescription('The agent generate this trap when a configuration commit is initiated.')
batchConfigCommitCompleted = NotificationType((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2, 0, 5)).setObjects(("DELL-NETWORKING-COPY-CONFIG-MIB", "copyAlarmLevel"), ("DELL-NETWORKING-COPY-CONFIG-MIB", "copyAlarmString"), ("DELL-NETWORKING-COPY-CONFIG-MIB", "copyAlarmIndex"))
if mibBuilder.loadTexts: batchConfigCommitCompleted.setStatus('current')
if mibBuilder.loadTexts: batchConfigCommitCompleted.setDescription('The agent generate this trap when a configuration commit is completed.')
mibBuilder.exportSymbols("DELL-NETWORKING-COPY-CONFIG-MIB", dellNetCopyConfigTraps=dellNetCopyConfigTraps, dellNetCopyConfig=dellNetCopyConfig, DellNetConfigFileType=DellNetConfigFileType, copyAlarmMibNotifications=copyAlarmMibNotifications, copyAlarmLevel=copyAlarmLevel, dellNetCopyConfigMib=dellNetCopyConfigMib, copyFailCause=copyFailCause, copyDestFileName=copyDestFileName, copyServerInetAddressType=copyServerInetAddressType, DellNetConfigFileLocation=DellNetConfigFileLocation, copyTimeStarted=copyTimeStarted, copySrcFileLocation=copySrcFileLocation, copyDestFileLocation=copyDestFileLocation, copyState=copyState, copySrcFileType=copySrcFileType, copyConfigCompleted=copyConfigCompleted, copyUserPassword=copyUserPassword, batchConfigCommitProgress=batchConfigCommitProgress, dellNetCopyConfigObjects=dellNetCopyConfigObjects, DellNetConfigCopyState=DellNetConfigCopyState, copyConfigIndex=copyConfigIndex, copyServerInetAddress=copyServerInetAddress, DellNetConfigCopyFailCause=DellNetConfigCopyFailCause, dellNetCopyEntry=dellNetCopyEntry, copyDestFileType=copyDestFileType, copyAlarmVariable=copyAlarmVariable, copyServerAddress=copyServerAddress, batchConfigCommitCompleted=batchConfigCommitCompleted, copyAlarmIndex=copyAlarmIndex, copySrcFileName=copySrcFileName, PYSNMP_MODULE_ID=dellNetCopyConfigMib, copyTimeCompleted=copyTimeCompleted, configConflict=configConflict, configConflictClear=configConflictClear, copyUserName=copyUserName, dellNetCopyTable=dellNetCopyTable, copyAlarmString=copyAlarmString, copyEntryRowStatus=copyEntryRowStatus)
|
(octet_string, integer, object_identifier) = mibBuilder.importSymbols('ASN1', 'OctetString', 'Integer', 'ObjectIdentifier')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(constraints_union, value_range_constraint, single_value_constraint, value_size_constraint, constraints_intersection) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ConstraintsUnion', 'ValueRangeConstraint', 'SingleValueConstraint', 'ValueSizeConstraint', 'ConstraintsIntersection')
(dell_net_mgmt,) = mibBuilder.importSymbols('DELL-NETWORKING-SMI', 'dellNetMgmt')
(inet_address, inet_address_type) = mibBuilder.importSymbols('INET-ADDRESS-MIB', 'InetAddress', 'InetAddressType')
(module_compliance, notification_group, object_group) = mibBuilder.importSymbols('SNMPv2-CONF', 'ModuleCompliance', 'NotificationGroup', 'ObjectGroup')
(notification_type, integer32, mib_scalar, mib_table, mib_table_row, mib_table_column, mib_identifier, bits, counter32, object_identity, counter64, module_identity, gauge32, unsigned32, ip_address, iso, time_ticks) = mibBuilder.importSymbols('SNMPv2-SMI', 'NotificationType', 'Integer32', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'MibIdentifier', 'Bits', 'Counter32', 'ObjectIdentity', 'Counter64', 'ModuleIdentity', 'Gauge32', 'Unsigned32', 'IpAddress', 'iso', 'TimeTicks')
(row_status, display_string, textual_convention) = mibBuilder.importSymbols('SNMPv2-TC', 'RowStatus', 'DisplayString', 'TextualConvention')
dell_net_copy_config_mib = module_identity((1, 3, 6, 1, 4, 1, 6027, 3, 5))
dellNetCopyConfigMib.setRevisions(('2009-05-14 13:00', '2007-06-19 12:00', '2003-03-01 12:00'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts:
dellNetCopyConfigMib.setRevisionsDescriptions(('Added New enum for usbflash filesystem in Exascale', 'Update description to copy from remote server to local', 'Initial Revision'))
if mibBuilder.loadTexts:
dellNetCopyConfigMib.setLastUpdated('200905141300Z')
if mibBuilder.loadTexts:
dellNetCopyConfigMib.setOrganization('Dell Inc.')
if mibBuilder.loadTexts:
dellNetCopyConfigMib.setContactInfo('http://www.dell.com/support')
if mibBuilder.loadTexts:
dellNetCopyConfigMib.setDescription('Dell Networking OS Copy Config MIB provides copying of running-config to to startup-config and vice-versa, and Dell Networking OS files to local disk or other system via ftp or tftp. ')
dell_net_copy_config_objects = mib_identifier((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1))
dell_net_copy_config = mib_identifier((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1))
dell_net_copy_config_traps = mib_identifier((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2))
class Dellnetconfigfilelocation(Integer32):
subtype_spec = Integer32.subtypeSpec + constraints_union(single_value_constraint(1, 2, 3, 4, 5, 6, 7))
named_values = named_values(('flash', 1), ('slot0', 2), ('tftp', 3), ('ftp', 4), ('scp', 5), ('usbflash', 6), ('nfsmount', 7))
class Dellnetconfigfiletype(Integer32):
subtype_spec = Integer32.subtypeSpec + constraints_union(single_value_constraint(1, 2, 3))
named_values = named_values(('dellNetFile', 1), ('runningConfig', 2), ('startupConfig', 3))
class Dellnetconfigcopystate(Integer32):
subtype_spec = Integer32.subtypeSpec + constraints_union(single_value_constraint(1, 2, 3))
named_values = named_values(('running', 1), ('successful', 2), ('failed', 3))
class Dellnetconfigcopyfailcause(Integer32):
subtype_spec = Integer32.subtypeSpec + constraints_union(single_value_constraint(1, 2, 3, 4, 5, 6, 7))
named_values = named_values(('badFileName', 1), ('copyInProgress', 2), ('diskFull', 3), ('fileExist', 4), ('fileNotFound', 5), ('timeout', 6), ('unknown', 7))
dell_net_copy_table = mib_table((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1))
if mibBuilder.loadTexts:
dellNetCopyTable.setStatus('current')
if mibBuilder.loadTexts:
dellNetCopyTable.setDescription('A table of config-copy requests.')
dell_net_copy_entry = mib_table_row((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1)).setIndexNames((0, 'DELL-NETWORKING-COPY-CONFIG-MIB', 'copyConfigIndex'))
if mibBuilder.loadTexts:
dellNetCopyEntry.setStatus('current')
if mibBuilder.loadTexts:
dellNetCopyEntry.setDescription('A config-copy request. To use this copy on NMS, user must first query the MIB. if the query returns the result of the previous copied and there is no pending copy operation, user can submit a SNMP SET with a random number as index with the appropraite information as specified by this MIB and the row status as CreateAndGo. The system will only keep the last 5 copy requests as the history. If there are ten entries in the copy request table, the subsequent copy request will replace the existing one in the copy table. 1) To copy running-config from local directory to startup-config. Set the following mib objects in the copy table copySrcFileType : runningConfig (2) copyDestFileType : startupConfig (3) 2) To copy startup-config from local directory to a remote site. Set the following mib objects in the copy table copySrcFileType : startupConfig (3) copyDestFileType : dellNetFile (1) copyDestFileLocation : ftp (4) copyDestFileName : /user/tester1/ftp/ copyServerAddress : 172.20.10.123 copyUserName : tester1 copyUserPassword : mypasswd 3) To copy a file from local directory to a remote site. Set the following mib objects in the copy table copySrcFileType : dellNetFile (1) copySrcFileLocation : slot0 (2) copySrcFileName : NVTRACE_LOG_DIR/LP4-nvtrace-0 copyDestFileType : dellNetFile (1) copyDestFileLocation : ftp (4) copyDestFileName : /usr/tester1/trace/backup/LP4-nvtrace-0 copyServerAddress : 172.20.10.123 copyUserName : tester1 copyUserPassword : mypasswd ')
copy_config_index = mib_table_column((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 1), integer32())
if mibBuilder.loadTexts:
copyConfigIndex.setStatus('current')
if mibBuilder.loadTexts:
copyConfigIndex.setDescription('To initiate a config copy request, user should assign a positive random value as an index. ')
copy_src_file_type = mib_table_column((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 2), dell_net_config_file_type()).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
copySrcFileType.setStatus('current')
if mibBuilder.loadTexts:
copySrcFileType.setDescription('Specifies the type of file to copy from. if the copySrcFileType is runningConfig(2) or startupConfig(3), the default DellNetConfigFileLocation is flash(1). If the copySrcFileType has the value of dellNetFile(1), it is expected that the copySrcFileLocation and copySrcFileName must also be spcified. The three objects together will uniquely identify the source file. ')
copy_src_file_location = mib_table_column((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 3), dell_net_config_file_location()).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
copySrcFileLocation.setStatus('current')
if mibBuilder.loadTexts:
copySrcFileLocation.setDescription('Specifies the location of source file. If the copySrcFileType has the value of dellNetFile(1), it is expected that the copySrcFileType and copySrcFileName must also be spcified. The three objects together will uniquely identify the source file. If the copySrcFileLocation has the value of ftp(4) or scp(5), it is expected the login information liked copyServerAddress, copyUserName, and copyUserPassword also be spcified. ')
copy_src_file_name = mib_table_column((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 4), display_string()).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
copySrcFileName.setStatus('current')
if mibBuilder.loadTexts:
copySrcFileName.setDescription('The file name (including the path, if applicable) of the file. If copySourceFileType is set to runningConfig or startupConfig, copySrcFileName is not needed. ')
copy_dest_file_type = mib_table_column((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 5), dell_net_config_file_type()).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
copyDestFileType.setStatus('current')
if mibBuilder.loadTexts:
copyDestFileType.setDescription('Specifies the type of file to copy to. if the copyDestFileType is runningConfig(2) or startupConfig(3), the default dellNetDestFileLocation is flash(1). If the copyDestFileType has the value of dellNetFile(1), it is expected that the copyDestFileLocation and copyDestFileName must also be spcified. The three objects together will uniquely identify the destination file. ')
copy_dest_file_location = mib_table_column((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 6), dell_net_config_file_location()).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
copyDestFileLocation.setStatus('current')
if mibBuilder.loadTexts:
copyDestFileLocation.setDescription('Specifies the location of destination file. If the copyDestFileType has the value of dellNetFile(1), it is expected that the copyDestFileType and copyDestFileName must also be spcified. The three objects together will uniquely identify the destination file. If the copyDestFileLocation has the value of ftp(4) or scp(5), it is expected the login information liked copyServerAddress, copyUserName, and copyUserPassword also be spcified. ')
copy_dest_file_name = mib_table_column((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 7), display_string()).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
copyDestFileName.setStatus('current')
if mibBuilder.loadTexts:
copyDestFileName.setDescription('Specifies the location of destination file. If the copyDestFileType has the value of dellNetFile(1), it is expected that the dellNetCopyDestFileTyp and copyDestFileLocation must also be spcified. The three objects together will uniquely identify the source file. If the copyDestFileLocation has the value of ftp(4) or scp(5), it is expected the login information liked copyServerAddress, copyUserName, and copyUserPassword also be spcified. ')
copy_server_address = mib_table_column((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 8), ip_address()).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
copyServerAddress.setStatus('deprecated')
if mibBuilder.loadTexts:
copyServerAddress.setDescription('The ip address of the tftp server from (or to) which to copy the configuration file. Values of 0.0.0.0 or FF.FF.FF.FF for copyServerAddress are not allowed. If the copyDestFileLocation has the value of ftp(4) or scp(5), it is expected the login information liked copyServerAddress, copyUserName, and copyUserPassword also be spcified. ')
copy_user_name = mib_table_column((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 9), display_string().subtype(subtypeSpec=value_size_constraint(1, 15))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
copyUserName.setStatus('current')
if mibBuilder.loadTexts:
copyUserName.setDescription('Remote user name for copy via ftp, or scp. If the copyDestFileLocation has the value of ftp(4) or scp(5), it is expected the login information liked copyServerAddress, copyUserName, and copyUserPassword also be spcified. ')
copy_user_password = mib_table_column((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 10), display_string().subtype(subtypeSpec=value_size_constraint(1, 15))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
copyUserPassword.setStatus('current')
if mibBuilder.loadTexts:
copyUserPassword.setDescription('Password used by ftp, scp for copying a file to an ftp/scp server. If the copyDestFileLocation has the value of ftp(4) or scp(5), it is expected the login information liked copyServerAddress, copyUserName, and copyUserPassword also be spcified. ')
copy_state = mib_table_column((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 11), dell_net_config_copy_state()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
copyState.setStatus('current')
if mibBuilder.loadTexts:
copyState.setDescription(' The state of config-copy operation. ')
copy_time_started = mib_table_column((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 12), time_ticks()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
copyTimeStarted.setStatus('current')
if mibBuilder.loadTexts:
copyTimeStarted.setDescription(' The timetick when the copy started. ')
copy_time_completed = mib_table_column((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 13), time_ticks()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
copyTimeCompleted.setStatus('current')
if mibBuilder.loadTexts:
copyTimeCompleted.setDescription(' The timetick when the copy completed. ')
copy_fail_cause = mib_table_column((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 14), dell_net_config_copy_fail_cause()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
copyFailCause.setStatus('current')
if mibBuilder.loadTexts:
copyFailCause.setDescription(' The reason a config-copy request failed. ')
copy_entry_row_status = mib_table_column((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 15), row_status()).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
copyEntryRowStatus.setStatus('current')
if mibBuilder.loadTexts:
copyEntryRowStatus.setDescription(' The state of the copy operation. Uses CreateAndGo when you are performing the copy. The state is set to active when the copy is completed. ')
copy_server_inet_address_type = mib_table_column((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 16), inet_address_type()).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
copyServerInetAddressType.setStatus('current')
if mibBuilder.loadTexts:
copyServerInetAddressType.setDescription(' The address type of copyServerInetAddress. Only ipv4 (1), ipv6 (2) and dns (16) types are supported. ')
copy_server_inet_address = mib_table_column((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 1, 1, 1, 17), inet_address()).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
copyServerInetAddress.setStatus('current')
if mibBuilder.loadTexts:
copyServerInetAddress.setDescription(' The IP address of the address ftp/tftp/scp server from or to which to copy the configuration file. If the copyDestFileLocation has the value of ftp(4) or scp(5), it is expected the login information copyUserName and copyUserPassword also be spcified. ')
copy_alarm_mib_notifications = mib_identifier((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2, 0))
copy_alarm_variable = mib_identifier((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2, 1))
copy_alarm_level = mib_scalar((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2, 1, 1), integer32()).setMaxAccess('accessiblefornotify')
if mibBuilder.loadTexts:
copyAlarmLevel.setStatus('current')
if mibBuilder.loadTexts:
copyAlarmLevel.setDescription('the message warning level')
copy_alarm_string = mib_scalar((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2, 1, 2), octet_string()).setMaxAccess('accessiblefornotify')
if mibBuilder.loadTexts:
copyAlarmString.setStatus('current')
if mibBuilder.loadTexts:
copyAlarmString.setDescription('An generic string value in the TRAP object')
copy_alarm_index = mib_scalar((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2, 1, 3), integer32()).setMaxAccess('accessiblefornotify')
if mibBuilder.loadTexts:
copyAlarmIndex.setStatus('current')
if mibBuilder.loadTexts:
copyAlarmIndex.setDescription("the index of the current copy. Indicates the index of the current copy, i.e. copyConfigIndex of dellNetCopyTable. Set to '-1' if copy executed by CLI")
copy_config_completed = notification_type((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2, 0, 1)).setObjects(('DELL-NETWORKING-COPY-CONFIG-MIB', 'copyAlarmLevel'), ('DELL-NETWORKING-COPY-CONFIG-MIB', 'copyAlarmString'), ('DELL-NETWORKING-COPY-CONFIG-MIB', 'copyAlarmIndex'))
if mibBuilder.loadTexts:
copyConfigCompleted.setStatus('current')
if mibBuilder.loadTexts:
copyConfigCompleted.setDescription('The agent generate this trap when a copy operational is completed.')
config_conflict = notification_type((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2, 0, 2)).setObjects(('DELL-NETWORKING-COPY-CONFIG-MIB', 'copyAlarmLevel'), ('DELL-NETWORKING-COPY-CONFIG-MIB', 'copyAlarmString'), ('DELL-NETWORKING-COPY-CONFIG-MIB', 'copyAlarmIndex'))
if mibBuilder.loadTexts:
configConflict.setStatus('current')
if mibBuilder.loadTexts:
configConflict.setDescription('The agent generate this trap when a configuration conflict found during audit.')
config_conflict_clear = notification_type((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2, 0, 3)).setObjects(('DELL-NETWORKING-COPY-CONFIG-MIB', 'copyAlarmLevel'), ('DELL-NETWORKING-COPY-CONFIG-MIB', 'copyAlarmString'), ('DELL-NETWORKING-COPY-CONFIG-MIB', 'copyAlarmIndex'))
if mibBuilder.loadTexts:
configConflictClear.setStatus('current')
if mibBuilder.loadTexts:
configConflictClear.setDescription('The agent generate this trap when a configuration conflict resolved during audit.')
batch_config_commit_progress = notification_type((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2, 0, 4)).setObjects(('DELL-NETWORKING-COPY-CONFIG-MIB', 'copyAlarmLevel'), ('DELL-NETWORKING-COPY-CONFIG-MIB', 'copyAlarmString'), ('DELL-NETWORKING-COPY-CONFIG-MIB', 'copyAlarmIndex'))
if mibBuilder.loadTexts:
batchConfigCommitProgress.setStatus('current')
if mibBuilder.loadTexts:
batchConfigCommitProgress.setDescription('The agent generate this trap when a configuration commit is initiated.')
batch_config_commit_completed = notification_type((1, 3, 6, 1, 4, 1, 6027, 3, 5, 1, 2, 0, 5)).setObjects(('DELL-NETWORKING-COPY-CONFIG-MIB', 'copyAlarmLevel'), ('DELL-NETWORKING-COPY-CONFIG-MIB', 'copyAlarmString'), ('DELL-NETWORKING-COPY-CONFIG-MIB', 'copyAlarmIndex'))
if mibBuilder.loadTexts:
batchConfigCommitCompleted.setStatus('current')
if mibBuilder.loadTexts:
batchConfigCommitCompleted.setDescription('The agent generate this trap when a configuration commit is completed.')
mibBuilder.exportSymbols('DELL-NETWORKING-COPY-CONFIG-MIB', dellNetCopyConfigTraps=dellNetCopyConfigTraps, dellNetCopyConfig=dellNetCopyConfig, DellNetConfigFileType=DellNetConfigFileType, copyAlarmMibNotifications=copyAlarmMibNotifications, copyAlarmLevel=copyAlarmLevel, dellNetCopyConfigMib=dellNetCopyConfigMib, copyFailCause=copyFailCause, copyDestFileName=copyDestFileName, copyServerInetAddressType=copyServerInetAddressType, DellNetConfigFileLocation=DellNetConfigFileLocation, copyTimeStarted=copyTimeStarted, copySrcFileLocation=copySrcFileLocation, copyDestFileLocation=copyDestFileLocation, copyState=copyState, copySrcFileType=copySrcFileType, copyConfigCompleted=copyConfigCompleted, copyUserPassword=copyUserPassword, batchConfigCommitProgress=batchConfigCommitProgress, dellNetCopyConfigObjects=dellNetCopyConfigObjects, DellNetConfigCopyState=DellNetConfigCopyState, copyConfigIndex=copyConfigIndex, copyServerInetAddress=copyServerInetAddress, DellNetConfigCopyFailCause=DellNetConfigCopyFailCause, dellNetCopyEntry=dellNetCopyEntry, copyDestFileType=copyDestFileType, copyAlarmVariable=copyAlarmVariable, copyServerAddress=copyServerAddress, batchConfigCommitCompleted=batchConfigCommitCompleted, copyAlarmIndex=copyAlarmIndex, copySrcFileName=copySrcFileName, PYSNMP_MODULE_ID=dellNetCopyConfigMib, copyTimeCompleted=copyTimeCompleted, configConflict=configConflict, configConflictClear=configConflictClear, copyUserName=copyUserName, dellNetCopyTable=dellNetCopyTable, copyAlarmString=copyAlarmString, copyEntryRowStatus=copyEntryRowStatus)
|
text = input()
first = "AB"
second = "BA"
flag = False
while True:
if text.find(first) != -1:
text = text[text.find(first)+2:]
elif text.find(second) != -1:
text = text[text.find(second)+2:]
else:
break
if len(text) == 0:
flag = True
break
if flag == True:
print("YES")
else:
print("NO")
|
text = input()
first = 'AB'
second = 'BA'
flag = False
while True:
if text.find(first) != -1:
text = text[text.find(first) + 2:]
elif text.find(second) != -1:
text = text[text.find(second) + 2:]
else:
break
if len(text) == 0:
flag = True
break
if flag == True:
print('YES')
else:
print('NO')
|
routes = {
'{"command": "devs"}' : {"STATUS":[{"STATUS":"S","When":1553528607,"Code":9,"Msg":"3 GPU(s)","Description":"sgminer 5.6.2-b"}],"DEVS":[{"ASC":0,"Name":"BKLU","ID":0,"Enabled":"Y","Status":"Alive","Temperature":43.00,"MHS av":14131.4720,"MHS 5s":14130.6009,"Accepted":11788,"Rejected":9,"Hardware Errors":0,"Utility":2.9159,"Last Share Pool":0,"Last Share Time":1553528606,"Total MH":3427718036.9040,"Diff1 Work":224456704.000000,"Difficulty Accepted":195866624.00000000,"Difficulty Rejected":147456.00000000,"Last Share Difficulty":16384.00000000,"No Device":False,"Last Valid Work":1553528606,"Device Hardware%":0.0000,"Device Rejected%":0.0657,"Device Elapsed":242559},{"ASC":1,"Name":"BKLU","ID":1,"Enabled":"Y","Status":"Alive","Temperature":42.00,"MHS av":14131.4689,"MHS 5s":14131.1408,"Accepted":11757,"Rejected":10,"Hardware Errors":0,"Utility":2.9082,"Last Share Pool":0,"Last Share Time":1553528603,"Total MH":3427717273.8192,"Diff1 Work":223334400.000000,"Difficulty Accepted":195428352.00000000,"Difficulty Rejected":163840.00000000,"Last Share Difficulty":16384.00000000,"No Device":False,"Last Valid Work":1553528603,"Device Hardware%":0.0000,"Device Rejected%":0.0734,"Device Elapsed":242559},{"ASC":2,"Name":"BKLU","ID":2,"Enabled":"Y","Status":"Alive","Temperature":39.00,"MHS av":14131.4688,"MHS 5s":14131.2909,"Accepted":11570,"Rejected":12,"Hardware Errors":0,"Utility":2.8620,"Last Share Pool":0,"Last Share Time":1553528605,"Total MH":3427717259.6881,"Diff1 Work":219270144.000000,"Difficulty Accepted":191811584.00000000,"Difficulty Rejected":196608.00000000,"Last Share Difficulty":16384.00000000,"No Device":False,"Last Valid Work":1553528605,"Device Hardware%":0.0000,"Device Rejected%":0.0897,"Device Elapsed":242559}],"id":1},
'{"command": "pools"}' : {"STATUS":[{"STATUS":"S","When":1553528611,"Code":7,"Msg":"6 Pool(s)","Description":"sgminer 5.6.2-b"}],"POOLS":[{"POOL":0,"Name":"lbry.usa.nicehash.com","URL":"stratum+tcp://lbry.usa.nicehash.com:3356","Profile":"","Algorithm":"lbry","Algorithm Type":"Lbry","Description":"","Status":"Alive","Priority":0,"Quota":1,"Long Poll":"N","Getworks":7014,"Accepted":35115,"Rejected":31,"Works":3931179,"Discarded":135670,"Stale":5043,"Get Failures":0,"Remote Failures":0,"User":"3J6HNskoH271PVPFvfAmBqUmarMFjwwfCX.1","Last Share Time":1553528606,"Diff1 Shares":667061248.000000,"Proxy Type":"","Proxy":"","Difficulty Accepted":583106560.00000000,"Difficulty Rejected":507904.00000000,"Difficulty Stale":83446784.00000000,"Last Share Difficulty":16384.00000000,"Has Stratum":True,"Stratum Active":True,"Stratum URL":"lbry.usa.nicehash.com","Has GBT":False,"Best Share":1286062923.914351,"Pool Rejected%":0.0761,"Pool Stale%":12.5096},{"POOL":1,"Name":"decred.usa.nicehash.com","URL":"stratum+tcp://decred.usa.nicehash.com:3354","Profile":"","Algorithm":"decred","Algorithm Type":"Decred","Description":"","Status":"Alive","Priority":1,"Quota":1,"Long Poll":"N","Getworks":0,"Accepted":0,"Rejected":0,"Works":0,"Discarded":0,"Stale":0,"Get Failures":0,"Remote Failures":0,"User":"3J6HNskoH271PVPFvfAmBqUmarMFjwwfCX.1","Last Share Time":0,"Diff1 Shares":0.000000,"Proxy Type":"","Proxy":"","Difficulty Accepted":0.00000000,"Difficulty Rejected":0.00000000,"Difficulty Stale":0.00000000,"Last Share Difficulty":0.00000000,"Has Stratum":True,"Stratum Active":False,"Stratum URL":"","Has GBT":False,"Best Share":0.000000,"Pool Rejected%":0.0000,"Pool Stale%":0.0000},{"POOL":2,"Name":"blake256r14.usa.nicehash.com","URL":"stratum+tcp://blake256r14.usa.nicehash.com:3350","Profile":"","Algorithm":"blake256r14","Algorithm Type":"Blake","Description":"","Status":"Dead","Priority":1,"Quota":1,"Long Poll":"N","Getworks":0,"Accepted":0,"Rejected":0,"Works":0,"Discarded":0,"Stale":0,"Get Failures":0,"Remote Failures":0,"User":"3J6HNskoH271PVPFvfAmBqUmarMFjwwfCX.1","Last Share Time":0,"Diff1 Shares":0.000000,"Proxy Type":"","Proxy":"","Difficulty Accepted":0.00000000,"Difficulty Rejected":0.00000000,"Difficulty Stale":0.00000000,"Last Share Difficulty":0.00000000,"Has Stratum":True,"Stratum Active":False,"Stratum URL":"","Has GBT":False,"Best Share":0.000000,"Pool Rejected%":0.0000,"Pool Stale%":0.0000},{"POOL":3,"Name":"blake256r8.usa.nicehash.com","URL":"stratum+tcp://blake256r8.usa.nicehash.com:3349","Profile":"","Algorithm":"blake256r8","Algorithm Type":"Blakecoin","Description":"","Status":"Alive","Priority":1,"Quota":1,"Long Poll":"N","Getworks":0,"Accepted":0,"Rejected":0,"Works":0,"Discarded":0,"Stale":0,"Get Failures":0,"Remote Failures":0,"User":"3J6HNskoH271PVPFvfAmBqUmarMFjwwfCX.1","Last Share Time":0,"Diff1 Shares":0.000000,"Proxy Type":"","Proxy":"","Difficulty Accepted":0.00000000,"Difficulty Rejected":0.00000000,"Difficulty Stale":0.00000000,"Last Share Difficulty":0.00000000,"Has Stratum":True,"Stratum Active":False,"Stratum URL":"","Has GBT":False,"Best Share":0.000000,"Pool Rejected%":0.0000,"Pool Stale%":0.0000},{"POOL":4,"Name":"pascal.usa.nicehash.com","URL":"stratum+tcp://pascal.usa.nicehash.com:3358","Profile":"","Algorithm":"pascal","Algorithm Type":"Pascal","Description":"","Status":"Alive","Priority":1,"Quota":1,"Long Poll":"N","Getworks":0,"Accepted":0,"Rejected":0,"Works":0,"Discarded":0,"Stale":0,"Get Failures":0,"Remote Failures":0,"User":"3J6HNskoH271PVPFvfAmBqUmarMFjwwfCX.1","Last Share Time":0,"Diff1 Shares":0.000000,"Proxy Type":"","Proxy":"","Difficulty Accepted":0.00000000,"Difficulty Rejected":0.00000000,"Difficulty Stale":0.00000000,"Last Share Difficulty":0.00000000,"Has Stratum":True,"Stratum Active":False,"Stratum URL":"","Has GBT":False,"Best Share":0.000000,"Pool Rejected%":0.0000,"Pool Stale%":0.0000},{"POOL":5,"Name":"sia.usa.nicehash.com","URL":"stratum+tcp://sia.usa.nicehash.com:3360","Profile":"","Algorithm":"sia","Algorithm Type":"Sia","Description":"","Status":"Alive","Priority":2,"Quota":1,"Long Poll":"N","Getworks":0,"Accepted":0,"Rejected":0,"Works":0,"Discarded":0,"Stale":0,"Get Failures":0,"Remote Failures":0,"User":"3J6HNskoH271PVPFvfAmBqUmarMFjwwfCX.1","Last Share Time":0,"Diff1 Shares":0.000000,"Proxy Type":"","Proxy":"","Difficulty Accepted":0.00000000,"Difficulty Rejected":0.00000000,"Difficulty Stale":0.00000000,"Last Share Difficulty":0.00000000,"Has Stratum":True,"Stratum Active":False,"Stratum URL":"","Has GBT":False,"Best Share":0.000000,"Pool Rejected%":0.0000,"Pool Stale%":0.0000}],"id":1},
'{"command": "summary"}' : {"STATUS":[{"STATUS":"S","When":1553528611,"Code":11,"Msg":"Summary","Description":"sgminer 5.6.2-b"}],"SUMMARY":[{"Elapsed":242564,"MHS av":42394.3021,"MHS 5s":42383.2910,"KHS av":42394302,"KHS 5s":42383291,"Found Blocks":3995,"Getworks":7014,"Accepted":35115,"Rejected":31,"Hardware Errors":0,"Utility":8.6859,"Discarded":135670,"Stale":5043,"Get Failures":0,"Local Work":4072753,"Remote Failures":0,"Network Blocks":1548,"Total MH":10283339200.9091,"Work Utility":165002.4113,"Difficulty Accepted":583106560.00000000,"Difficulty Rejected":507904.00000000,"Difficulty Stale":83446784.00000000,"Best Share":1286062923.914351,"Device Hardware%":0.0000,"Device Rejected%":0.0761,"Pool Rejected%":0.0761,"Pool Stale%":12.5096,"Last getwork":1553528611}],"id":1}
}
|
routes = {'{"command": "devs"}': {'STATUS': [{'STATUS': 'S', 'When': 1553528607, 'Code': 9, 'Msg': '3 GPU(s)', 'Description': 'sgminer 5.6.2-b'}], 'DEVS': [{'ASC': 0, 'Name': 'BKLU', 'ID': 0, 'Enabled': 'Y', 'Status': 'Alive', 'Temperature': 43.0, 'MHS av': 14131.472, 'MHS 5s': 14130.6009, 'Accepted': 11788, 'Rejected': 9, 'Hardware Errors': 0, 'Utility': 2.9159, 'Last Share Pool': 0, 'Last Share Time': 1553528606, 'Total MH': 3427718036.904, 'Diff1 Work': 224456704.0, 'Difficulty Accepted': 195866624.0, 'Difficulty Rejected': 147456.0, 'Last Share Difficulty': 16384.0, 'No Device': False, 'Last Valid Work': 1553528606, 'Device Hardware%': 0.0, 'Device Rejected%': 0.0657, 'Device Elapsed': 242559}, {'ASC': 1, 'Name': 'BKLU', 'ID': 1, 'Enabled': 'Y', 'Status': 'Alive', 'Temperature': 42.0, 'MHS av': 14131.4689, 'MHS 5s': 14131.1408, 'Accepted': 11757, 'Rejected': 10, 'Hardware Errors': 0, 'Utility': 2.9082, 'Last Share Pool': 0, 'Last Share Time': 1553528603, 'Total MH': 3427717273.8192, 'Diff1 Work': 223334400.0, 'Difficulty Accepted': 195428352.0, 'Difficulty Rejected': 163840.0, 'Last Share Difficulty': 16384.0, 'No Device': False, 'Last Valid Work': 1553528603, 'Device Hardware%': 0.0, 'Device Rejected%': 0.0734, 'Device Elapsed': 242559}, {'ASC': 2, 'Name': 'BKLU', 'ID': 2, 'Enabled': 'Y', 'Status': 'Alive', 'Temperature': 39.0, 'MHS av': 14131.4688, 'MHS 5s': 14131.2909, 'Accepted': 11570, 'Rejected': 12, 'Hardware Errors': 0, 'Utility': 2.862, 'Last Share Pool': 0, 'Last Share Time': 1553528605, 'Total MH': 3427717259.6881, 'Diff1 Work': 219270144.0, 'Difficulty Accepted': 191811584.0, 'Difficulty Rejected': 196608.0, 'Last Share Difficulty': 16384.0, 'No Device': False, 'Last Valid Work': 1553528605, 'Device Hardware%': 0.0, 'Device Rejected%': 0.0897, 'Device Elapsed': 242559}], 'id': 1}, '{"command": "pools"}': {'STATUS': [{'STATUS': 'S', 'When': 1553528611, 'Code': 7, 'Msg': '6 Pool(s)', 'Description': 'sgminer 5.6.2-b'}], 'POOLS': [{'POOL': 0, 'Name': 'lbry.usa.nicehash.com', 'URL': 'stratum+tcp://lbry.usa.nicehash.com:3356', 'Profile': '', 'Algorithm': 'lbry', 'Algorithm Type': 'Lbry', 'Description': '', 'Status': 'Alive', 'Priority': 0, 'Quota': 1, 'Long Poll': 'N', 'Getworks': 7014, 'Accepted': 35115, 'Rejected': 31, 'Works': 3931179, 'Discarded': 135670, 'Stale': 5043, 'Get Failures': 0, 'Remote Failures': 0, 'User': '3J6HNskoH271PVPFvfAmBqUmarMFjwwfCX.1', 'Last Share Time': 1553528606, 'Diff1 Shares': 667061248.0, 'Proxy Type': '', 'Proxy': '', 'Difficulty Accepted': 583106560.0, 'Difficulty Rejected': 507904.0, 'Difficulty Stale': 83446784.0, 'Last Share Difficulty': 16384.0, 'Has Stratum': True, 'Stratum Active': True, 'Stratum URL': 'lbry.usa.nicehash.com', 'Has GBT': False, 'Best Share': 1286062923.914351, 'Pool Rejected%': 0.0761, 'Pool Stale%': 12.5096}, {'POOL': 1, 'Name': 'decred.usa.nicehash.com', 'URL': 'stratum+tcp://decred.usa.nicehash.com:3354', 'Profile': '', 'Algorithm': 'decred', 'Algorithm Type': 'Decred', 'Description': '', 'Status': 'Alive', 'Priority': 1, 'Quota': 1, 'Long Poll': 'N', 'Getworks': 0, 'Accepted': 0, 'Rejected': 0, 'Works': 0, 'Discarded': 0, 'Stale': 0, 'Get Failures': 0, 'Remote Failures': 0, 'User': '3J6HNskoH271PVPFvfAmBqUmarMFjwwfCX.1', 'Last Share Time': 0, 'Diff1 Shares': 0.0, 'Proxy Type': '', 'Proxy': '', 'Difficulty Accepted': 0.0, 'Difficulty Rejected': 0.0, 'Difficulty Stale': 0.0, 'Last Share Difficulty': 0.0, 'Has Stratum': True, 'Stratum Active': False, 'Stratum URL': '', 'Has GBT': False, 'Best Share': 0.0, 'Pool Rejected%': 0.0, 'Pool Stale%': 0.0}, {'POOL': 2, 'Name': 'blake256r14.usa.nicehash.com', 'URL': 'stratum+tcp://blake256r14.usa.nicehash.com:3350', 'Profile': '', 'Algorithm': 'blake256r14', 'Algorithm Type': 'Blake', 'Description': '', 'Status': 'Dead', 'Priority': 1, 'Quota': 1, 'Long Poll': 'N', 'Getworks': 0, 'Accepted': 0, 'Rejected': 0, 'Works': 0, 'Discarded': 0, 'Stale': 0, 'Get Failures': 0, 'Remote Failures': 0, 'User': '3J6HNskoH271PVPFvfAmBqUmarMFjwwfCX.1', 'Last Share Time': 0, 'Diff1 Shares': 0.0, 'Proxy Type': '', 'Proxy': '', 'Difficulty Accepted': 0.0, 'Difficulty Rejected': 0.0, 'Difficulty Stale': 0.0, 'Last Share Difficulty': 0.0, 'Has Stratum': True, 'Stratum Active': False, 'Stratum URL': '', 'Has GBT': False, 'Best Share': 0.0, 'Pool Rejected%': 0.0, 'Pool Stale%': 0.0}, {'POOL': 3, 'Name': 'blake256r8.usa.nicehash.com', 'URL': 'stratum+tcp://blake256r8.usa.nicehash.com:3349', 'Profile': '', 'Algorithm': 'blake256r8', 'Algorithm Type': 'Blakecoin', 'Description': '', 'Status': 'Alive', 'Priority': 1, 'Quota': 1, 'Long Poll': 'N', 'Getworks': 0, 'Accepted': 0, 'Rejected': 0, 'Works': 0, 'Discarded': 0, 'Stale': 0, 'Get Failures': 0, 'Remote Failures': 0, 'User': '3J6HNskoH271PVPFvfAmBqUmarMFjwwfCX.1', 'Last Share Time': 0, 'Diff1 Shares': 0.0, 'Proxy Type': '', 'Proxy': '', 'Difficulty Accepted': 0.0, 'Difficulty Rejected': 0.0, 'Difficulty Stale': 0.0, 'Last Share Difficulty': 0.0, 'Has Stratum': True, 'Stratum Active': False, 'Stratum URL': '', 'Has GBT': False, 'Best Share': 0.0, 'Pool Rejected%': 0.0, 'Pool Stale%': 0.0}, {'POOL': 4, 'Name': 'pascal.usa.nicehash.com', 'URL': 'stratum+tcp://pascal.usa.nicehash.com:3358', 'Profile': '', 'Algorithm': 'pascal', 'Algorithm Type': 'Pascal', 'Description': '', 'Status': 'Alive', 'Priority': 1, 'Quota': 1, 'Long Poll': 'N', 'Getworks': 0, 'Accepted': 0, 'Rejected': 0, 'Works': 0, 'Discarded': 0, 'Stale': 0, 'Get Failures': 0, 'Remote Failures': 0, 'User': '3J6HNskoH271PVPFvfAmBqUmarMFjwwfCX.1', 'Last Share Time': 0, 'Diff1 Shares': 0.0, 'Proxy Type': '', 'Proxy': '', 'Difficulty Accepted': 0.0, 'Difficulty Rejected': 0.0, 'Difficulty Stale': 0.0, 'Last Share Difficulty': 0.0, 'Has Stratum': True, 'Stratum Active': False, 'Stratum URL': '', 'Has GBT': False, 'Best Share': 0.0, 'Pool Rejected%': 0.0, 'Pool Stale%': 0.0}, {'POOL': 5, 'Name': 'sia.usa.nicehash.com', 'URL': 'stratum+tcp://sia.usa.nicehash.com:3360', 'Profile': '', 'Algorithm': 'sia', 'Algorithm Type': 'Sia', 'Description': '', 'Status': 'Alive', 'Priority': 2, 'Quota': 1, 'Long Poll': 'N', 'Getworks': 0, 'Accepted': 0, 'Rejected': 0, 'Works': 0, 'Discarded': 0, 'Stale': 0, 'Get Failures': 0, 'Remote Failures': 0, 'User': '3J6HNskoH271PVPFvfAmBqUmarMFjwwfCX.1', 'Last Share Time': 0, 'Diff1 Shares': 0.0, 'Proxy Type': '', 'Proxy': '', 'Difficulty Accepted': 0.0, 'Difficulty Rejected': 0.0, 'Difficulty Stale': 0.0, 'Last Share Difficulty': 0.0, 'Has Stratum': True, 'Stratum Active': False, 'Stratum URL': '', 'Has GBT': False, 'Best Share': 0.0, 'Pool Rejected%': 0.0, 'Pool Stale%': 0.0}], 'id': 1}, '{"command": "summary"}': {'STATUS': [{'STATUS': 'S', 'When': 1553528611, 'Code': 11, 'Msg': 'Summary', 'Description': 'sgminer 5.6.2-b'}], 'SUMMARY': [{'Elapsed': 242564, 'MHS av': 42394.3021, 'MHS 5s': 42383.291, 'KHS av': 42394302, 'KHS 5s': 42383291, 'Found Blocks': 3995, 'Getworks': 7014, 'Accepted': 35115, 'Rejected': 31, 'Hardware Errors': 0, 'Utility': 8.6859, 'Discarded': 135670, 'Stale': 5043, 'Get Failures': 0, 'Local Work': 4072753, 'Remote Failures': 0, 'Network Blocks': 1548, 'Total MH': 10283339200.9091, 'Work Utility': 165002.4113, 'Difficulty Accepted': 583106560.0, 'Difficulty Rejected': 507904.0, 'Difficulty Stale': 83446784.0, 'Best Share': 1286062923.914351, 'Device Hardware%': 0.0, 'Device Rejected%': 0.0761, 'Pool Rejected%': 0.0761, 'Pool Stale%': 12.5096, 'Last getwork': 1553528611}], 'id': 1}}
|
TAG_ALBUM = "album"
TAG_ALBUM_ARTIST = "album_artist"
TAG_ARTIST = "artist"
TAG_DURATION = "duration"
TAG_GENRE = "genre"
TAG_TITLE = "title"
TAG_TRACK = "track"
TAG_YEAR = "year"
TAGS = frozenset([
TAG_ALBUM,
TAG_ALBUM_ARTIST,
TAG_ARTIST,
TAG_DURATION,
TAG_GENRE,
TAG_TITLE,
TAG_TRACK,
TAG_YEAR,
])
|
tag_album = 'album'
tag_album_artist = 'album_artist'
tag_artist = 'artist'
tag_duration = 'duration'
tag_genre = 'genre'
tag_title = 'title'
tag_track = 'track'
tag_year = 'year'
tags = frozenset([TAG_ALBUM, TAG_ALBUM_ARTIST, TAG_ARTIST, TAG_DURATION, TAG_GENRE, TAG_TITLE, TAG_TRACK, TAG_YEAR])
|
class Solution:
def sortItems(self, n: int, m: int, group: List[int], beforeItems: List[List[int]]) -> List[int]:
cnt = 0
groups = collections.defaultdict(list)
for i, g in enumerate(group):
if g == -1:
group[i] = cnt + m
cnt += 1
groups[group[i]].append(i)
degrees = [0] * (m + cnt)
graphs = collections.defaultdict(set)
follows = collections.defaultdict(list)
for v, befores in enumerate(beforeItems):
for u in befores:
if group[u] != group[v]:
degrees[group[v]] += 1
follows[group[u]].append(group[v])
else:
graphs[group[u]].add((u, v))
frees = []
for i in range(cnt + m):
if degrees[i] == 0:
frees.append(i)
group_seq = []
while frees:
node = frees.pop()
group_seq.append(node)
for nei in follows[node]:
degrees[nei] -= 1
if degrees[nei] == 0:
frees.append(nei)
if len(group_seq) != m + cnt:
return []
ans = []
for gidx in group_seq:
if len(groups[gidx]) == 1:
ans.append(groups[gidx].pop())
else:
eles = groups[gidx]
edges = graphs[gidx]
degrees = {e : 0 for e in eles}
follows = collections.defaultdict(set)
for u, v in edges:
degrees[v] += 1
follows[u].add(v)
frees = []
for e in eles:
if degrees[e] == 0:
frees.append(e)
seq = []
while frees:
node = frees.pop()
seq.append(node)
for nei in follows[node]:
degrees[nei] -= 1
if degrees[nei] == 0:
frees.append(nei)
if len(seq) == len(eles):
ans.extend(seq)
else:
return []
return ans
|
class Solution:
def sort_items(self, n: int, m: int, group: List[int], beforeItems: List[List[int]]) -> List[int]:
cnt = 0
groups = collections.defaultdict(list)
for (i, g) in enumerate(group):
if g == -1:
group[i] = cnt + m
cnt += 1
groups[group[i]].append(i)
degrees = [0] * (m + cnt)
graphs = collections.defaultdict(set)
follows = collections.defaultdict(list)
for (v, befores) in enumerate(beforeItems):
for u in befores:
if group[u] != group[v]:
degrees[group[v]] += 1
follows[group[u]].append(group[v])
else:
graphs[group[u]].add((u, v))
frees = []
for i in range(cnt + m):
if degrees[i] == 0:
frees.append(i)
group_seq = []
while frees:
node = frees.pop()
group_seq.append(node)
for nei in follows[node]:
degrees[nei] -= 1
if degrees[nei] == 0:
frees.append(nei)
if len(group_seq) != m + cnt:
return []
ans = []
for gidx in group_seq:
if len(groups[gidx]) == 1:
ans.append(groups[gidx].pop())
else:
eles = groups[gidx]
edges = graphs[gidx]
degrees = {e: 0 for e in eles}
follows = collections.defaultdict(set)
for (u, v) in edges:
degrees[v] += 1
follows[u].add(v)
frees = []
for e in eles:
if degrees[e] == 0:
frees.append(e)
seq = []
while frees:
node = frees.pop()
seq.append(node)
for nei in follows[node]:
degrees[nei] -= 1
if degrees[nei] == 0:
frees.append(nei)
if len(seq) == len(eles):
ans.extend(seq)
else:
return []
return ans
|
def calc_eccentricity(dist_list):
"""Calculate and return eccentricity from list of radii."""
apoapsis = max(dist_list)
periapsis = min(dist_list)
eccentricity = (apoapsis - periapsis) / (apoapsis + periapsis)
return eccentricity
|
def calc_eccentricity(dist_list):
"""Calculate and return eccentricity from list of radii."""
apoapsis = max(dist_list)
periapsis = min(dist_list)
eccentricity = (apoapsis - periapsis) / (apoapsis + periapsis)
return eccentricity
|
description = 'NICOS demo startup setup'
group = 'lowlevel'
# startupcode = '''
# printinfo("============================================================")
# printinfo("Welcome to the NICOS demo.")
# printinfo("Run one of the following commands to set up either a triple-axis")
# printinfo("or a SANS demo setup:")
# printinfo(" > NewSetup('tas')")
# printinfo(" > NewSetup('sans')")
# '''
|
description = 'NICOS demo startup setup'
group = 'lowlevel'
|
#!/usr/bin/env python
class Life:
def __init__(self, name='unknown'):
print('Hello ' + name)
self.name = name
def live(self):
print(self.name)
def __del__(self):
print('Goodbye ' + self.name)
brian = Life('Brian')
brian.live()
brian = 'leretta'
|
class Life:
def __init__(self, name='unknown'):
print('Hello ' + name)
self.name = name
def live(self):
print(self.name)
def __del__(self):
print('Goodbye ' + self.name)
brian = life('Brian')
brian.live()
brian = 'leretta'
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'lzma_sdk_sources': [
'7z.h',
'7zAlloc.c',
'7zAlloc.h',
'7zArcIn.c',
'7zBuf.c',
'7zBuf.h',
'7zCrc.c',
'7zCrc.h',
'7zCrcOpt.c',
'7zDec.c',
'7zFile.c',
'7zFile.h',
'7zStream.c',
'7zTypes.h',
'Alloc.c',
'Alloc.h',
'Bcj2.c',
'Bcj2.h',
'Bra.c',
'Bra.h',
'Bra86.c',
'Compiler.h',
'CpuArch.c',
'CpuArch.h',
'Delta.c',
'Delta.h',
'LzFind.c',
'LzFind.h',
'LzHash.h',
'Lzma2Dec.c',
'Lzma2Dec.h',
'LzmaEnc.c',
'LzmaEnc.h',
'LzmaDec.c',
'LzmaDec.h',
'LzmaLib.c',
'LzmaLib.h',
'Precomp.h',
],
},
'targets': [
{
'target_name': 'lzma_sdk',
'type': 'static_library',
'defines': [
'_7ZIP_ST',
'_7Z_NO_METHODS_FILTERS',
'_LZMA_PROB32',
],
'variables': {
# Upstream uses self-assignment to avoid warnings.
'clang_warning_flags': [ '-Wno-self-assign' ]
},
'sources': [
'<@(lzma_sdk_sources)',
],
'include_dirs': [
'.',
],
'direct_dependent_settings': {
'include_dirs': [
'.',
],
},
},
],
'conditions': [
['OS=="win"', {
'targets': [
{
'target_name': 'lzma_sdk64',
'type': 'static_library',
'defines': [
'_7ZIP_ST',
'_LZMA_PROB32',
],
'variables': {
# Upstream uses self-assignment to avoid warnings.
'clang_warning_flags': [ '-Wno-self-assign' ]
},
'include_dirs': [
'.',
],
'sources': [
'<@(lzma_sdk_sources)',
],
'configurations': {
'Common_Base': {
'msvs_target_platform': 'x64',
},
},
'direct_dependent_settings': {
'include_dirs': [
'.',
],
},
},
],
}],
],
}
|
{'variables': {'lzma_sdk_sources': ['7z.h', '7zAlloc.c', '7zAlloc.h', '7zArcIn.c', '7zBuf.c', '7zBuf.h', '7zCrc.c', '7zCrc.h', '7zCrcOpt.c', '7zDec.c', '7zFile.c', '7zFile.h', '7zStream.c', '7zTypes.h', 'Alloc.c', 'Alloc.h', 'Bcj2.c', 'Bcj2.h', 'Bra.c', 'Bra.h', 'Bra86.c', 'Compiler.h', 'CpuArch.c', 'CpuArch.h', 'Delta.c', 'Delta.h', 'LzFind.c', 'LzFind.h', 'LzHash.h', 'Lzma2Dec.c', 'Lzma2Dec.h', 'LzmaEnc.c', 'LzmaEnc.h', 'LzmaDec.c', 'LzmaDec.h', 'LzmaLib.c', 'LzmaLib.h', 'Precomp.h']}, 'targets': [{'target_name': 'lzma_sdk', 'type': 'static_library', 'defines': ['_7ZIP_ST', '_7Z_NO_METHODS_FILTERS', '_LZMA_PROB32'], 'variables': {'clang_warning_flags': ['-Wno-self-assign']}, 'sources': ['<@(lzma_sdk_sources)'], 'include_dirs': ['.'], 'direct_dependent_settings': {'include_dirs': ['.']}}], 'conditions': [['OS=="win"', {'targets': [{'target_name': 'lzma_sdk64', 'type': 'static_library', 'defines': ['_7ZIP_ST', '_LZMA_PROB32'], 'variables': {'clang_warning_flags': ['-Wno-self-assign']}, 'include_dirs': ['.'], 'sources': ['<@(lzma_sdk_sources)'], 'configurations': {'Common_Base': {'msvs_target_platform': 'x64'}}, 'direct_dependent_settings': {'include_dirs': ['.']}}]}]]}
|
config = {
'sampling_rate': 22050,
'hop_size': 256,
'model_type': 'hifigan_generator',
'hifigan_generator_params': {
'out_channels': 1,
'kernel_size': 7,
'filters': 128,
'use_bias': True,
'upsample_scales': [8, 8, 2, 2],
'stacks': 3,
'stack_kernel_size': [3, 7, 11],
'stack_dilation_rate': [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
'use_final_nolinear_activation': True,
'is_weight_norm': False,
},
'hifigan_discriminator_params': {
'out_channels': 1,
'period_scales': [2, 3, 5, 7, 11],
'n_layers': 5,
'kernel_size': 5,
'strides': 3,
'filters': 8,
'filter_scales': 4,
'max_filters': 512,
'is_weight_norm': False,
},
'melgan_discriminator_params': {
'out_channels': 1,
'scales': 3,
'downsample_pooling': 'AveragePooling1D',
'downsample_pooling_params': {'pool_size': 4, 'strides': 2},
'kernel_sizes': [5, 3],
'filters': 16,
'max_downsample_filters': 512,
'downsample_scales': [4, 4, 4, 4],
'nonlinear_activation': 'LeakyReLU',
'nonlinear_activation_params': {'alpha': 0.2},
'is_weight_norm': False,
},
'stft_loss_params': {
'fft_lengths': [1024, 2048, 512],
'frame_steps': [120, 240, 50],
'frame_lengths': [600, 1200, 240],
},
'lambda_feat_match': 10.0,
'lambda_adv': 4.0,
'batch_size': 16,
'batch_max_steps': 8192,
'batch_max_steps_valid': 81920,
'remove_short_samples': True,
'allow_cache': True,
'is_shuffle': True,
}
|
config = {'sampling_rate': 22050, 'hop_size': 256, 'model_type': 'hifigan_generator', 'hifigan_generator_params': {'out_channels': 1, 'kernel_size': 7, 'filters': 128, 'use_bias': True, 'upsample_scales': [8, 8, 2, 2], 'stacks': 3, 'stack_kernel_size': [3, 7, 11], 'stack_dilation_rate': [[1, 3, 5], [1, 3, 5], [1, 3, 5]], 'use_final_nolinear_activation': True, 'is_weight_norm': False}, 'hifigan_discriminator_params': {'out_channels': 1, 'period_scales': [2, 3, 5, 7, 11], 'n_layers': 5, 'kernel_size': 5, 'strides': 3, 'filters': 8, 'filter_scales': 4, 'max_filters': 512, 'is_weight_norm': False}, 'melgan_discriminator_params': {'out_channels': 1, 'scales': 3, 'downsample_pooling': 'AveragePooling1D', 'downsample_pooling_params': {'pool_size': 4, 'strides': 2}, 'kernel_sizes': [5, 3], 'filters': 16, 'max_downsample_filters': 512, 'downsample_scales': [4, 4, 4, 4], 'nonlinear_activation': 'LeakyReLU', 'nonlinear_activation_params': {'alpha': 0.2}, 'is_weight_norm': False}, 'stft_loss_params': {'fft_lengths': [1024, 2048, 512], 'frame_steps': [120, 240, 50], 'frame_lengths': [600, 1200, 240]}, 'lambda_feat_match': 10.0, 'lambda_adv': 4.0, 'batch_size': 16, 'batch_max_steps': 8192, 'batch_max_steps_valid': 81920, 'remove_short_samples': True, 'allow_cache': True, 'is_shuffle': True}
|
def countdown(num):
print(num)
if num == 0:
return
else:
countdown(num - 1)
if __name__ == "__main__":
countdown(10)
|
def countdown(num):
print(num)
if num == 0:
return
else:
countdown(num - 1)
if __name__ == '__main__':
countdown(10)
|
class GridNode(object):
"""
A structure that represents a particular location in (U,V) from a grid.
GridNode(uIndex: int,vIndex: int)
"""
@staticmethod
def __new__(self,uIndex,vIndex):
"""
__new__[GridNode]() -> GridNode
__new__(cls: type,uIndex: int,vIndex: int)
"""
pass
UIndex=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The node's index along the U axis.
Get: UIndex(self: GridNode) -> int
Set: UIndex(self: GridNode)=value
"""
VIndex=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The node's index along the V axis.
Get: VIndex(self: GridNode) -> int
Set: VIndex(self: GridNode)=value
"""
|
class Gridnode(object):
"""
A structure that represents a particular location in (U,V) from a grid.
GridNode(uIndex: int,vIndex: int)
"""
@staticmethod
def __new__(self, uIndex, vIndex):
"""
__new__[GridNode]() -> GridNode
__new__(cls: type,uIndex: int,vIndex: int)
"""
pass
u_index = property(lambda self: object(), lambda self, v: None, lambda self: None)
"The node's index along the U axis.\n\n\n\nGet: UIndex(self: GridNode) -> int\n\n\n\nSet: UIndex(self: GridNode)=value\n\n"
v_index = property(lambda self: object(), lambda self, v: None, lambda self: None)
"The node's index along the V axis.\n\n\n\nGet: VIndex(self: GridNode) -> int\n\n\n\nSet: VIndex(self: GridNode)=value\n\n"
|
class LocationPathFormatError(Exception):
pass
class LocationStepFormatError(Exception):
pass
class NodenameFormatError(Exception):
pass
class PredicateFormatError(Exception):
pass
class PredicatesFormatError(Exception):
pass
|
class Locationpathformaterror(Exception):
pass
class Locationstepformaterror(Exception):
pass
class Nodenameformaterror(Exception):
pass
class Predicateformaterror(Exception):
pass
class Predicatesformaterror(Exception):
pass
|
burst_time=[]
print("Enter the number of process: ")
n=int(input())
print("Enter the burst time of the processes: \n")
burst_time=list(map(int, input().split()))
waiting_time=[]
avg_waiting_time=0
turnaround_time=[]
avg_turnaround_time=0
waiting_time.insert(0,0)
turnaround_time.insert(0,burst_time[0])
for i in range(1,len(burst_time)):
waiting_time.insert(i,waiting_time[i-1]+burst_time[i-1])
turnaround_time.insert(i,waiting_time[i]+burst_time[i])
avg_waiting_time+=waiting_time[i]
avg_turnaround_time+=turnaround_time[i]
avg_waiting_time=float(avg_waiting_time)/n
avg_turnaround_time=float(avg_turnaround_time)/n
print("\n")
print("Process\t Burst Time\t Waiting Time\t Turn Around Time")
for i in range(0,n):
print(str(i)+"\t\t"+str(burst_time[i])+"\t\t"+str(waiting_time[i])+"\t\t"+str(turnaround_time[i]))
print("\n")
print("Average Waiting time is: "+str(avg_waiting_time))
print("Average Turn Arount Time is: "+str(avg_turnaround_time))
|
burst_time = []
print('Enter the number of process: ')
n = int(input())
print('Enter the burst time of the processes: \n')
burst_time = list(map(int, input().split()))
waiting_time = []
avg_waiting_time = 0
turnaround_time = []
avg_turnaround_time = 0
waiting_time.insert(0, 0)
turnaround_time.insert(0, burst_time[0])
for i in range(1, len(burst_time)):
waiting_time.insert(i, waiting_time[i - 1] + burst_time[i - 1])
turnaround_time.insert(i, waiting_time[i] + burst_time[i])
avg_waiting_time += waiting_time[i]
avg_turnaround_time += turnaround_time[i]
avg_waiting_time = float(avg_waiting_time) / n
avg_turnaround_time = float(avg_turnaround_time) / n
print('\n')
print('Process\t Burst Time\t Waiting Time\t Turn Around Time')
for i in range(0, n):
print(str(i) + '\t\t' + str(burst_time[i]) + '\t\t' + str(waiting_time[i]) + '\t\t' + str(turnaround_time[i]))
print('\n')
print('Average Waiting time is: ' + str(avg_waiting_time))
print('Average Turn Arount Time is: ' + str(avg_turnaround_time))
|
lista = [
[1,2,3,4,5,6,7,9,8,10],
[1,3,3,4,5,6,7,8,9,10],
[1,7,3,4,5,6,7,8,9,10],
[1,2,3,4,5,6,7,8,9,10],
[1,2,3,4,5,6,7,8,9,10],
[1,2,3,4,5,6,7,8,9,10],
[1,8,3,4,5,6,7,8,9,10],
]
def verificar(lista):
ls = lista
for index_lista in lista:
for aux in index_lista:
contador = 0
for lista in index_lista:
print(f'aux : {aux} lista: {lista}')
if aux == lista:
contador += 1
print('contaador ', contador)
if contador == 2:
ind = ls.index(index_lista)
print(f'no index da lista numero {ind} o numero: {aux} se repete')
return 0
# print(aux)
#print('============')
verificar(lista)
|
lista = [[1, 2, 3, 4, 5, 6, 7, 9, 8, 10], [1, 3, 3, 4, 5, 6, 7, 8, 9, 10], [1, 7, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], [1, 8, 3, 4, 5, 6, 7, 8, 9, 10]]
def verificar(lista):
ls = lista
for index_lista in lista:
for aux in index_lista:
contador = 0
for lista in index_lista:
print(f'aux : {aux} lista: {lista}')
if aux == lista:
contador += 1
print('contaador ', contador)
if contador == 2:
ind = ls.index(index_lista)
print(f'no index da lista numero {ind} o numero: {aux} se repete')
return 0
verificar(lista)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.