content
stringlengths 7
1.05M
| fixed_cases
stringlengths 1
1.28M
|
---|---|
dtypes = {
'float32': 'float',
'float64': 'double',
'complex64': 'complex_float',
'complex128': 'complex_double'
}
""" Possible dptypes for spaces and c equivalents """
compatible_dtypes = { # name: (tuple of compatible dtypes)
'float32': ('float32', 'complex64'),
'float64': ('float64', 'complex128'),
'complex64': ('complex64',),
'complex128': ('complex128',),
}
""" Compatibility between basis and result types """
# Describes available spaces and their wrapper implementation.
# Most of the characteristics (barycentric, dual...) are guessed later on.
# However, these characteristics can be specified here. The characteristics
# will be used in the actual space factories by the users.
# The default implementation is 'grid_only', eg a single constructor that takes
# a grid as its only argument.
spaces = {
'PiecewiseConstantScalarSpace': {
'doc': 'Space of piecewise constant scalar functions',
},
'PiecewiseConstantScalarSpaceBarycentric': {
'doc': 'Space of piecewise constant scalar functions',
},
'PiecewiseConstantDualGridScalarSpace': {
'doc': 'Space of piecewise constant scalar functions on the dual grid',
},
'PiecewiseLinearContinuousScalarSpace': {
'doc': 'Space of continuous, piecewise linear scalar functions',
},
'PiecewiseLinearDiscontinuousScalarSpace': {
'doc':
'Space of piecewise linear, possibly discontinuous, scalar functions',
},
'PiecewiseLinearDiscontinuousScalarSpaceBarycentric': {
'doc':
'Space of piecewise constant functions define on the dual grid',
'dual': True # According top the doxygen tag...
},
'PiecewisePolynomialContinuousScalarSpace': {
'doc':
'Space of continuous, piecewise polynomial scalar functions',
'implementation': 'polynomial'
}
}
# Guess implementation from class name
# Mostly means guessing wether space operates on the direct or dual grid,
# whether functions are continuous, whether they are constant, linear, or
# polynomial, etc. These facts are used later on in the actual space factory.
for key, description in spaces.items():
if 'implementation' not in description:
description['implementation'] = 'grid_only'
if 'header' not in description:
f = lambda x: x if x.islower() else '_' + x.lower()
description['header'] = 'bempp/space/%s.hpp' \
% (key[0].lower() + ''.join([f(u) for u in key[1:]]))
if 'scalar' not in description:
description['scalar'] = 'scalar' in key.lower()
if description['scalar'] and 'order' not in description:
if 'constant' in key.lower():
description['order'] = 'constant'
elif 'linear' in key.lower():
description['order'] = 'linear'
else:
description['order'] = 'polynomial'
if 'continuous' not in description:
description['continuous'] = 'discontinuous' not in key.lower()
if 'dual' not in description:
description['dual'] = 'dual' in key.lower()
if 'barycentric' not in description:
description['barycentric'] = 'barycentric' in key.lower()
|
dtypes = {'float32': 'float', 'float64': 'double', 'complex64': 'complex_float', 'complex128': 'complex_double'}
' Possible dptypes for spaces and c equivalents '
compatible_dtypes = {'float32': ('float32', 'complex64'), 'float64': ('float64', 'complex128'), 'complex64': ('complex64',), 'complex128': ('complex128',)}
' Compatibility between basis and result types '
spaces = {'PiecewiseConstantScalarSpace': {'doc': 'Space of piecewise constant scalar functions'}, 'PiecewiseConstantScalarSpaceBarycentric': {'doc': 'Space of piecewise constant scalar functions'}, 'PiecewiseConstantDualGridScalarSpace': {'doc': 'Space of piecewise constant scalar functions on the dual grid'}, 'PiecewiseLinearContinuousScalarSpace': {'doc': 'Space of continuous, piecewise linear scalar functions'}, 'PiecewiseLinearDiscontinuousScalarSpace': {'doc': 'Space of piecewise linear, possibly discontinuous, scalar functions'}, 'PiecewiseLinearDiscontinuousScalarSpaceBarycentric': {'doc': 'Space of piecewise constant functions define on the dual grid', 'dual': True}, 'PiecewisePolynomialContinuousScalarSpace': {'doc': 'Space of continuous, piecewise polynomial scalar functions', 'implementation': 'polynomial'}}
for (key, description) in spaces.items():
if 'implementation' not in description:
description['implementation'] = 'grid_only'
if 'header' not in description:
f = lambda x: x if x.islower() else '_' + x.lower()
description['header'] = 'bempp/space/%s.hpp' % (key[0].lower() + ''.join([f(u) for u in key[1:]]))
if 'scalar' not in description:
description['scalar'] = 'scalar' in key.lower()
if description['scalar'] and 'order' not in description:
if 'constant' in key.lower():
description['order'] = 'constant'
elif 'linear' in key.lower():
description['order'] = 'linear'
else:
description['order'] = 'polynomial'
if 'continuous' not in description:
description['continuous'] = 'discontinuous' not in key.lower()
if 'dual' not in description:
description['dual'] = 'dual' in key.lower()
if 'barycentric' not in description:
description['barycentric'] = 'barycentric' in key.lower()
|
# M0_C4 - Peak Volumes
def get_peak_volumes(volumes):
# Write your code here
return "not implemented"
#### DO NOT TOUCH CODE BELOW THIS LINE ####
if __name__ == '__main__':
"""This code is for manual testing and is provided for your convenience."""
test_volumes = input("Input space-separated list of volumes: ")
converted = [int(a) for a in test_volumes.split()]
print(get_peak_volumes(converted))
|
def get_peak_volumes(volumes):
return 'not implemented'
if __name__ == '__main__':
'This code is for manual testing and is provided for your convenience.'
test_volumes = input('Input space-separated list of volumes: ')
converted = [int(a) for a in test_volumes.split()]
print(get_peak_volumes(converted))
|
#Algorithm Case Study
def naive(a,b):
x=a; y=b
z =0
while(x > 0):
z = z+y
x = x-1
return z
def testnaive():
i=0
while(i < 10):
j=0
while(j < 10):
print(i,j)
print(naive(i,j))
j += 1
i += 1
if __name__ == "__main__":
testnaive()
|
def naive(a, b):
x = a
y = b
z = 0
while x > 0:
z = z + y
x = x - 1
return z
def testnaive():
i = 0
while i < 10:
j = 0
while j < 10:
print(i, j)
print(naive(i, j))
j += 1
i += 1
if __name__ == '__main__':
testnaive()
|
"""
Entradas
dia-->int
mes-->int
Salidas
singno zodiacal-->float
"""
dia = int (input ('Digite el numero de dia: '))
mes = int (input ("Digite el numero de mes: "))
if (dia>=21 and mes==3) or (dia<=20 and mes==4):
print ('Aries')
if (dia>=24 and mes==9) or (dia<=23 and mes==10):
print ('Libra')
if (dia>=21 and mes==4) or (dia<=21 and mes==5):
print ('Tauro')
if (dia>=24 and mes==10) or (dia<=22 and mes==11):
print ('Escorpio')
if (dia>=22 and mes==5) or (dia<=21 and mes==6):
print ('Geminis')
if (dia>=23 and mes==11) or (dia<=21 and mes==12):
print ('Sagitario')
if (dia>=21 and mes==6) or (dia<=23 and mes==7):
print ('Cancer')
if (dia>=22 and mes==12) or (dia<=20 and mes==1):
print ('Capricornio')
if (dia>=24 and mes==7) or (dia<=23 and mes==8):
print ('Leo')
if (dia>=21 and mes==1) or (dia<=19 and mes==2):
print ('Acuario')
if (dia>=24 and mes==8) or (dia<=23 and mes==9):
print ('Virgo')
if (dia>=20 and mes==2) or (dia<=20 and mes==3):
print ('Piscis')
print ()
|
"""
Entradas
dia-->int
mes-->int
Salidas
singno zodiacal-->float
"""
dia = int(input('Digite el numero de dia: '))
mes = int(input('Digite el numero de mes: '))
if dia >= 21 and mes == 3 or (dia <= 20 and mes == 4):
print('Aries')
if dia >= 24 and mes == 9 or (dia <= 23 and mes == 10):
print('Libra')
if dia >= 21 and mes == 4 or (dia <= 21 and mes == 5):
print('Tauro')
if dia >= 24 and mes == 10 or (dia <= 22 and mes == 11):
print('Escorpio')
if dia >= 22 and mes == 5 or (dia <= 21 and mes == 6):
print('Geminis')
if dia >= 23 and mes == 11 or (dia <= 21 and mes == 12):
print('Sagitario')
if dia >= 21 and mes == 6 or (dia <= 23 and mes == 7):
print('Cancer')
if dia >= 22 and mes == 12 or (dia <= 20 and mes == 1):
print('Capricornio')
if dia >= 24 and mes == 7 or (dia <= 23 and mes == 8):
print('Leo')
if dia >= 21 and mes == 1 or (dia <= 19 and mes == 2):
print('Acuario')
if dia >= 24 and mes == 8 or (dia <= 23 and mes == 9):
print('Virgo')
if dia >= 20 and mes == 2 or (dia <= 20 and mes == 3):
print('Piscis')
print()
|
capacity = int(input())
income = 0
command = input()
while command != "Movie time!":
group_count = int(command)
if group_count > capacity:
print("The cinema is full.")
print(f"Cinema income - {income} lv.")
exit()
capacity -= group_count
group_tax = group_count * 5
if group_count % 3 == 0:
group_tax -= 5
income += group_tax
command = input()
print(f"There are {capacity} seats left in the cinema.")
print(f"Cinema income - {income} lv.")
|
capacity = int(input())
income = 0
command = input()
while command != 'Movie time!':
group_count = int(command)
if group_count > capacity:
print('The cinema is full.')
print(f'Cinema income - {income} lv.')
exit()
capacity -= group_count
group_tax = group_count * 5
if group_count % 3 == 0:
group_tax -= 5
income += group_tax
command = input()
print(f'There are {capacity} seats left in the cinema.')
print(f'Cinema income - {income} lv.')
|
#pretty print method
def indent(elem, level=0):
i = "\n" + level*" "
j = "\n" + (level-1)*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for subelem in elem:
indent(subelem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = j
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = j
return elem
|
def indent(elem, level=0):
i = '\n' + level * ' '
j = '\n' + (level - 1) * ' '
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + ' '
if not elem.tail or not elem.tail.strip():
elem.tail = i
for subelem in elem:
indent(subelem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = j
elif level and (not elem.tail or not elem.tail.strip()):
elem.tail = j
return elem
|
# -*- coding: utf-8 -*-
# This work is licensed under the MIT License.
# To view a copy of this license, visit https://www.gnu.org/licenses/
# Written by Taher Abbasi
# Email: [email protected]
class WrongSide(Exception):
pass
|
class Wrongside(Exception):
pass
|
skywater_metal1 = {"layer": 68, "datatype": 20 }
skywater_metal2 = {"layer": 69, "datatype": 20 }
skywater_metal3 = {"layer": 70, "datatype": 20 }
skywater_metal4 = {"layer": 71, "datatype": 20 }
skywater_metal5 = {"layer": 72, "datatype": 20 }
|
skywater_metal1 = {'layer': 68, 'datatype': 20}
skywater_metal2 = {'layer': 69, 'datatype': 20}
skywater_metal3 = {'layer': 70, 'datatype': 20}
skywater_metal4 = {'layer': 71, 'datatype': 20}
skywater_metal5 = {'layer': 72, 'datatype': 20}
|
rc = 0
def setup():
size(600, 600)
smooth()
background(0)
font = loadFont("Gadugi-Bold-48.vlw")
textFont(font, 48)
def draw():
global rc
translate(width/2, height/2)
pushMatrix()
rotate(rc)
fill(255)
text("Black", mouseX - width/4, mouseY - height/4)
popMatrix()
pushMatrix()
rotate(-rc * 1.5)
fill(0)
text("White", width/4 - mouseX, height/4 - mouseY)
popMatrix()
rc += 0.05
|
rc = 0
def setup():
size(600, 600)
smooth()
background(0)
font = load_font('Gadugi-Bold-48.vlw')
text_font(font, 48)
def draw():
global rc
translate(width / 2, height / 2)
push_matrix()
rotate(rc)
fill(255)
text('Black', mouseX - width / 4, mouseY - height / 4)
pop_matrix()
push_matrix()
rotate(-rc * 1.5)
fill(0)
text('White', width / 4 - mouseX, height / 4 - mouseY)
pop_matrix()
rc += 0.05
|
"""
Hamming Numbers
URL: https://www.codewars.com/kata/526d84b98f428f14a60008da/python
A Hamming number is a positive integer of the form 2^i, 3^j, 5^k,
for some non-negative integers i, j, and k.
Write a function that computes the nth smallest Hamming number.
Specifically:
The first smallest Hamming number is 1 = 2^0 * 3^0 * 5^0
The second smallest Hamming number is 2 = 2^1 * 3^0 * 5^0
The third smallest Hamming number is 3 = 2^0 * 3^1 * 5^0
The fourth smallest Hamming number is 4 = 2^2 * 3^0 * 5^0
The fifth smallest Hamming number is 5 = 2^0 * 3^0 * 5^1
Your code should be able to compute all of the smallest 5,000
Hamming numbers without timing out.
----------------------------------------------------------------
From research, Hamming Numbers are the same as Natural Numbers,
which are described on Wikipedia here:
https://en.wikipedia.org/wiki/Regular_number
This article contains a section on algorithms:
Algorithms for calculating the regular numbers in ascending order were
popularized by Edsger Dijkstra. Dijkstra attributes to Hamming the
problem of building the infinite ascending sequence of all 5-smooth
numbers; this problem is now known as Hamming's problem, and the
numbers so generated are also called the Hamming numbers.
Dijkstra's ideas to compute these numbers are the following:
1) The sequence of Hamming numbers begins with the number 1.
2) The remaining values in the sequence are of the form 2h, 3h,
and 5h, where h is any Hamming number.
Therefore, the sequence H may be generated by outputting the value 1,
and then merging the sequences 2H, 3H, and 5H.
This algorithm is often used to demonstrate the power of a lazy
functional programming language, because (implicitly) concurrent
efficient implementations, using a constant number of arithmetic
operations per generated value, are easily constructed as described
above.
...
In the Python programming language, lazy functional code for generating
regular numbers is used as one of the built-in tests for correctness
of the language's implementation (it implements a lazy list). I
don't understand generators well enough to solve the problem this
way, so I'm going to try and get by with what I do know...
"""
hammings = [1]
#version 2 will try to remember where we put the lowest 'new'
#Hamming and only start from there
#version 3 uses set() and list sorting to beat extendHamming()!
last_good_index = 0
def hamming_v1(n):
global hammings
index = 0
for h in hammings:
index += 1
if index == n:
return hammings[n-1]
extendHamming(hammings, 2*h)
extendHamming(hammings, 3*h)
extendHamming(hammings, 5*h)
def hamming_v2(n):
global hammings
global last_good_index
index = last_good_index
if n-1 < index:
return hammings[n-1]
else:
for h in hammings[last_good_index:]:
index += 1
if index == n:
last_good_index = n
return hammings[n-1]
extendHamming(hammings, 2*h)
extendHamming(hammings, 3*h)
extendHamming(hammings, 5*h)
def hamming_v3(n):
global last_good_index
global hammings
index = last_good_index
if n-1 < index:
return hammings[n-1]
else:
while True:
h = hammings[index]
index += 1
if index == n:
last_good_index = n-1
return hammings[n-1]
hammings = sorted(set(hammings).update([2*h, 3*h, 5*h]))
def extendHamming(hammings, new_hamming):
if new_hamming not in hammings:
#insert the new number in the correct place
for j in range(len(hammings), 0, -1):
if hammings[j-1] < new_hamming:
hammings.insert(j, new_hamming)
return
print(hamming_v1(7)) # 8
print(hamming_v1(12)) # 16
print(hamming_v1(20)) # 36
print(hamming_v1(100)) # 1536
print(hamming_v1(1000)) # 51200000
print(hamming_v1(777)) # 10628820
print(hamming_v1(77)) # 729
print(hamming_v1(7)) # 8
print(hamming_v2(7))
print(hamming_v2(12))
print(hamming_v2(20))
print(hamming_v2(100))
print(hamming_v2(1000))
print(hamming_v2(777))
print(hamming_v2(77))
print(hamming_v2(7))
print(hamming_v3(7))
print(hamming_v3(12))
print(hamming_v3(20))
print(hamming_v3(100))
print(hamming_v3(1000))
print(hamming_v3(777))
print(hamming_v3(77))
print(hamming_v3(7))
|
"""
Hamming Numbers
URL: https://www.codewars.com/kata/526d84b98f428f14a60008da/python
A Hamming number is a positive integer of the form 2^i, 3^j, 5^k,
for some non-negative integers i, j, and k.
Write a function that computes the nth smallest Hamming number.
Specifically:
The first smallest Hamming number is 1 = 2^0 * 3^0 * 5^0
The second smallest Hamming number is 2 = 2^1 * 3^0 * 5^0
The third smallest Hamming number is 3 = 2^0 * 3^1 * 5^0
The fourth smallest Hamming number is 4 = 2^2 * 3^0 * 5^0
The fifth smallest Hamming number is 5 = 2^0 * 3^0 * 5^1
Your code should be able to compute all of the smallest 5,000
Hamming numbers without timing out.
----------------------------------------------------------------
From research, Hamming Numbers are the same as Natural Numbers,
which are described on Wikipedia here:
https://en.wikipedia.org/wiki/Regular_number
This article contains a section on algorithms:
Algorithms for calculating the regular numbers in ascending order were
popularized by Edsger Dijkstra. Dijkstra attributes to Hamming the
problem of building the infinite ascending sequence of all 5-smooth
numbers; this problem is now known as Hamming's problem, and the
numbers so generated are also called the Hamming numbers.
Dijkstra's ideas to compute these numbers are the following:
1) The sequence of Hamming numbers begins with the number 1.
2) The remaining values in the sequence are of the form 2h, 3h,
and 5h, where h is any Hamming number.
Therefore, the sequence H may be generated by outputting the value 1,
and then merging the sequences 2H, 3H, and 5H.
This algorithm is often used to demonstrate the power of a lazy
functional programming language, because (implicitly) concurrent
efficient implementations, using a constant number of arithmetic
operations per generated value, are easily constructed as described
above.
...
In the Python programming language, lazy functional code for generating
regular numbers is used as one of the built-in tests for correctness
of the language's implementation (it implements a lazy list). I
don't understand generators well enough to solve the problem this
way, so I'm going to try and get by with what I do know...
"""
hammings = [1]
last_good_index = 0
def hamming_v1(n):
global hammings
index = 0
for h in hammings:
index += 1
if index == n:
return hammings[n - 1]
extend_hamming(hammings, 2 * h)
extend_hamming(hammings, 3 * h)
extend_hamming(hammings, 5 * h)
def hamming_v2(n):
global hammings
global last_good_index
index = last_good_index
if n - 1 < index:
return hammings[n - 1]
else:
for h in hammings[last_good_index:]:
index += 1
if index == n:
last_good_index = n
return hammings[n - 1]
extend_hamming(hammings, 2 * h)
extend_hamming(hammings, 3 * h)
extend_hamming(hammings, 5 * h)
def hamming_v3(n):
global last_good_index
global hammings
index = last_good_index
if n - 1 < index:
return hammings[n - 1]
else:
while True:
h = hammings[index]
index += 1
if index == n:
last_good_index = n - 1
return hammings[n - 1]
hammings = sorted(set(hammings).update([2 * h, 3 * h, 5 * h]))
def extend_hamming(hammings, new_hamming):
if new_hamming not in hammings:
for j in range(len(hammings), 0, -1):
if hammings[j - 1] < new_hamming:
hammings.insert(j, new_hamming)
return
print(hamming_v1(7))
print(hamming_v1(12))
print(hamming_v1(20))
print(hamming_v1(100))
print(hamming_v1(1000))
print(hamming_v1(777))
print(hamming_v1(77))
print(hamming_v1(7))
print(hamming_v2(7))
print(hamming_v2(12))
print(hamming_v2(20))
print(hamming_v2(100))
print(hamming_v2(1000))
print(hamming_v2(777))
print(hamming_v2(77))
print(hamming_v2(7))
print(hamming_v3(7))
print(hamming_v3(12))
print(hamming_v3(20))
print(hamming_v3(100))
print(hamming_v3(1000))
print(hamming_v3(777))
print(hamming_v3(77))
print(hamming_v3(7))
|
class MessagesSerializer:
fields = ['id', 'topic', 'payload', 'attributes', 'bulk']
def serialize(self, messages):
return [self._serialize_fields(message) for message in messages]
def _serialize_fields(self, message):
return {field: getattr(message, field, None) for field in self.fields}
|
class Messagesserializer:
fields = ['id', 'topic', 'payload', 'attributes', 'bulk']
def serialize(self, messages):
return [self._serialize_fields(message) for message in messages]
def _serialize_fields(self, message):
return {field: getattr(message, field, None) for field in self.fields}
|
# -*- coding: utf-8 -*-
# @Author: Anderson
# @Date: 2018-09-01 22:04:38
# @Last Modified by: Anderson
# @Last Modified time: 2018-09-14 15:55:53
class DS(object):
def __init__(self, N):
self.__id = list(range(0, N))
def is_connected(self, parent, child):
pid = ord(parent) - ord('A')
cid = ord(child) - ord('A')
connected = False
generation_count = 0
while not connected:
generation_count += 1
pid = self.__id[pid]
if pid == cid:
connected = True
elif pid == self.__id[pid]:
break
if connected:
result = 'child'
if generation_count > 1:
result = 'grandchild'
if generation_count > 2:
for _ in range(2, generation_count):
result = 'great-' + result
return result
else:
return '-'
def connect(self, parent, child):
if parent != '-':
self.__id[ord(parent)-ord('A')] = ord(child)-ord('A')
s = input()
n, m = s.split(' ')
n = int(n)
m = int(m)
ds = DS(26)
for _ in range(n):
child, father, mother = input()
ds.connect(father, child)
ds.connect(mother, child)
for _ in range(m):
a, b = input()
if a == b:
print('-')
else:
result = ds.is_connected(b, a)
if result == '-':
result = ds.is_connected(a, b)
result = result.replace('child', 'parent')
print(result)
|
class Ds(object):
def __init__(self, N):
self.__id = list(range(0, N))
def is_connected(self, parent, child):
pid = ord(parent) - ord('A')
cid = ord(child) - ord('A')
connected = False
generation_count = 0
while not connected:
generation_count += 1
pid = self.__id[pid]
if pid == cid:
connected = True
elif pid == self.__id[pid]:
break
if connected:
result = 'child'
if generation_count > 1:
result = 'grandchild'
if generation_count > 2:
for _ in range(2, generation_count):
result = 'great-' + result
return result
else:
return '-'
def connect(self, parent, child):
if parent != '-':
self.__id[ord(parent) - ord('A')] = ord(child) - ord('A')
s = input()
(n, m) = s.split(' ')
n = int(n)
m = int(m)
ds = ds(26)
for _ in range(n):
(child, father, mother) = input()
ds.connect(father, child)
ds.connect(mother, child)
for _ in range(m):
(a, b) = input()
if a == b:
print('-')
else:
result = ds.is_connected(b, a)
if result == '-':
result = ds.is_connected(a, b)
result = result.replace('child', 'parent')
print(result)
|
#
# PySNMP MIB module Juniper-IPV6-PROFILE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Juniper-IPV6-PROFILE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:03:09 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint")
InetAddressIPv6, = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressIPv6")
Ipv6AddressPrefix, = mibBuilder.importSymbols("IPV6-TC", "Ipv6AddressPrefix")
juniMibs, = mibBuilder.importSymbols("Juniper-MIBs", "juniMibs")
JuniName, JuniSetMap, JuniEnable = mibBuilder.importSymbols("Juniper-TC", "JuniName", "JuniSetMap", "JuniEnable")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Counter64, ModuleIdentity, Unsigned32, Counter32, NotificationType, Bits, ObjectIdentity, Gauge32, TimeTicks, IpAddress, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Counter64", "ModuleIdentity", "Unsigned32", "Counter32", "NotificationType", "Bits", "ObjectIdentity", "Gauge32", "TimeTicks", "IpAddress", "iso")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
juniIpv6ProfileMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68))
juniIpv6ProfileMIB.setRevisions(('2007-07-19 18:19', '2003-09-29 17:58',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: juniIpv6ProfileMIB.setRevisionsDescriptions(('Added ND support on dynamic interface.', 'Initial version of this MIB module.',))
if mibBuilder.loadTexts: juniIpv6ProfileMIB.setLastUpdated('200707191819Z')
if mibBuilder.loadTexts: juniIpv6ProfileMIB.setOrganization('Juniper Networks')
if mibBuilder.loadTexts: juniIpv6ProfileMIB.setContactInfo(' Juniper Networks, Inc. Postal: 10 Technology Park Drive Westford MA 01886-3146 USA Tel: +1 978 589 5800 Email: [email protected]')
if mibBuilder.loadTexts: juniIpv6ProfileMIB.setDescription('The IPv6 Profile MIB for the Juniper Networks enterprise.')
juniIpv6ProfileObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1))
juniIpv6Profile = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1))
juniIpv6ProfileTable = MibTable((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1), )
if mibBuilder.loadTexts: juniIpv6ProfileTable.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileTable.setDescription('The entries in this table describe profiles for configuring IP interfaces. Entries in this table are created/deleted as a side-effect of corresponding operations to the juniProfileNameTable in the Juniper-PROFILE-MIB.')
juniIpv6ProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1), ).setIndexNames((0, "Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileId"))
if mibBuilder.loadTexts: juniIpv6ProfileEntry.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileEntry.setDescription('A profile describing configuration of an IPv6 interface.')
juniIpv6ProfileId = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 1), Unsigned32())
if mibBuilder.loadTexts: juniIpv6ProfileId.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileId.setDescription('The integer identifier associated with this profile. A value for this identifier is determined by locating or creating a profile name in the juniProfileNameTable.')
juniIpv6ProfileSetMap = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 2), JuniSetMap()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniIpv6ProfileSetMap.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileSetMap.setDescription("A bitmap representing which objects in this entry have been explicitly configured. See the definition of the JuniSetMap TEXTUAL-CONVENTION for details of use. The INDEX object(s) and this object are excluded from representation (i.e. their bits are never set). When a SET request does not explicitly configure JuniSetMap, bits in JuniSetMap are set as a side-effect of configuring other profile attributes in the same entry. If, however, a SET request explicitly configures JuniSetMap, the explicitly configured value overrides 1) any previous bit settings, and 2) any simultaneous 'side-effect' settings that would otherwise occur. Once set, bits can only be cleared by explicitly configuring JuniSetMap.")
juniIpv6ProfileRouterName = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 3), JuniName()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniIpv6ProfileRouterName.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileRouterName.setDescription('The virtual router to which an IPv6 interface configured by this profile will be assigned, if other mechanisms do not otherwise specify a virtual router assignment.')
juniIpv6ProfileIpv6Addr = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 4), InetAddressIPv6().clone(hexValue="00000000000000000000000000000000")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniIpv6ProfileIpv6Addr.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileIpv6Addr.setDescription('An IPv6 address to be used by an IPv6 interface configured by this profile. This object will have a value of 0::0 for an unnumbered interface.')
juniIpv6ProfileIpv6MaskLen = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 128))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniIpv6ProfileIpv6MaskLen.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileIpv6MaskLen.setDescription('An IPv6 address mask length to be used by an IPv6 interface configured by this profile. This object will have a value of 0 for an unnumbered interface.')
juniIpv6ProfileMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(1280, 10240), ))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniIpv6ProfileMtu.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileMtu.setDescription('The configured MTU size for this IPv6 network interface. If set to zero, the default MTU size, as determined by the underlying network media, is used.')
juniIpv6ProfileSrcAddrValidEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 7), JuniEnable().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniIpv6ProfileSrcAddrValidEnable.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileSrcAddrValidEnable.setDescription('Enable/disable whether source addresses in received IPv6 packets are validated. Validation is performed by looking up the source IPv6 address in the routing database and determining whether the packet arrived on the expected interface; if not, the packet is discarded.')
juniIpv6ProfileInheritNumString = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniIpv6ProfileInheritNumString.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileInheritNumString.setDescription("The text identifier of the numbered interface, associated with the specified virtual router, whose IPv6 address is used as the source address when transmitting IPv6 packets on unnumbered remote access user links. Types/formats/examples for this string include: Loopback loopback <id> 'loopback 0' ATM Virtual Circuit atm <slot>/<port>.<distinguisher> 'atm 3/1.100' Ethernet { fastEthernet | gigabitEthernet } <slot>/<port> 'fastEthernet 3/0' 'gigabitEthernet 3/0' Ethernet VLAN { fastEthernet | gigabitEthernet } <slot>/<port>:<vlanID> 'fastEthernet 3/0:1000' 'gigabitEthernet 3/0:1000' Channelized Serial serial <slot>/<port>:<channelSpecifier>[/<channelSpecifier>]* 'serial 3/0:4' (T1/E1) 'serial 3/0:2/4' (T3/E3) 'serial 3/0:2/1/1/4' (OC3/OC12 - channelized DS3) 'serial 3/0:2/1/1/1/4' (OC3/OC12 - virtual tributaries) Other formats may be supported over time. An empty string indicates the referenced interface is unspecified, e.g., when this IPv6 interface is numbered.")
juniIpv6ProfileNdEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 9), JuniEnable().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniIpv6ProfileNdEnabled.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileNdEnabled.setDescription('Enable/disable ND for this IPv6 network interface.')
juniIpv6ProfileNdManagedConfig = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 10), JuniEnable().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniIpv6ProfileNdManagedConfig.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileNdManagedConfig.setDescription('Enable/disable ND managed config for this IPv6 network interface.')
juniIpv6ProfileNdOtherConfig = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 11), JuniEnable().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniIpv6ProfileNdOtherConfig.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileNdOtherConfig.setDescription('Enable/disable ND other config for this IPv6 network interface.')
juniIpv6ProfileNdSuppressRa = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 12), JuniEnable().clone('disable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniIpv6ProfileNdSuppressRa.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileNdSuppressRa.setDescription('Enable/disable ND suppress RA for this IPv6 network interface.')
juniIpv6ProfileNdRaInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(3, 1800)).clone(200)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniIpv6ProfileNdRaInterval.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileNdRaInterval.setDescription('The configured interval between IPv6 RA transmissions on the interface.')
juniIpv6ProfileNdRaLifeTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 14), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1800)).clone(1800)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniIpv6ProfileNdRaLifeTime.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileNdRaLifeTime.setDescription('The configured RA lifetime for this IPv6 network interface.')
juniIpv6ProfileNdReachableTime = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 15), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniIpv6ProfileNdReachableTime.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileNdReachableTime.setDescription('The configured RA reachable time for this IPv6 network interface.')
juniIpv6ProfileNdPrefix = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 16), Ipv6AddressPrefix()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniIpv6ProfileNdPrefix.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileNdPrefix.setDescription('The prefix associated with the this interface.')
juniIpv6ProfileNdPrefixLength = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 17), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 128))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniIpv6ProfileNdPrefixLength.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileNdPrefixLength.setDescription('The length of the prefix (in bits).')
juniIpv6ProfileNdPrefixOnLinkFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 18), JuniEnable().clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniIpv6ProfileNdPrefixOnLinkFlag.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileNdPrefixOnLinkFlag.setDescription("This object has the value 'true(1)', if this prefix can be used for on-link determination and the value 'false(2)' otherwise.")
juniIpv6ProfileNdPrefixAutonomousFlag = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 19), JuniEnable().clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniIpv6ProfileNdPrefixAutonomousFlag.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileNdPrefixAutonomousFlag.setDescription('Autonomous address configuration flag. When true(1), indicates that this prefix can be used for autonomous address configuration (i.e. can be used to form a local interface address). If false(2), it is not used to autoconfigure a local interface address.')
juniIpv6ProfileNdPrefixPreferredLifetime = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 20), Integer32().clone(604800)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniIpv6ProfileNdPrefixPreferredLifetime.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileNdPrefixPreferredLifetime.setDescription('It is the length of time in seconds that this prefix will remain preferred, i.e. time until deprecation. A value of 4,294,967,295 represents infinity. The address generated from a deprecated prefix should no longer be used as a source address in new communications, but packets received on such an interface are processed as expected.')
juniIpv6ProfileNdPrefixValidLifetime = MibTableColumn((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 21), Integer32().clone(2592000)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: juniIpv6ProfileNdPrefixValidLifetime.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileNdPrefixValidLifetime.setDescription('It is the length of time in seconds that this prefix will remain valid, i.e. time until invalidation. A value of 4,294,967,295 represents infinity. The address generated from an invalidated prefix should not appear as the destination or source address of a packet.')
juniIpv6ProfileMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 4))
juniIpv6ProfileMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 4, 1))
juniIpv6ProfileMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 4, 2))
juniIpv6ProfileCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 4, 1, 1)).setObjects(("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpv6ProfileCompliance = juniIpv6ProfileCompliance.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpv6ProfileCompliance.setDescription('Obsolete Compliance statement for systems supporting IPv6 configuration profiles. This statement became obsolete when added ND support.')
juniIpv6ProfileCompliance1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 4, 1, 2)).setObjects(("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileGroup1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpv6ProfileCompliance1 = juniIpv6ProfileCompliance1.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileCompliance1.setDescription('Compliance statement for systems supporting IPv6 configuration profiles, incorporating support of ND on dynamical interface.')
juniIpv6ProfileGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 4, 2, 1)).setObjects(("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileSetMap"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileRouterName"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileIpv6Addr"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileIpv6MaskLen"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileMtu"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileSrcAddrValidEnable"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileInheritNumString"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpv6ProfileGroup = juniIpv6ProfileGroup.setStatus('obsolete')
if mibBuilder.loadTexts: juniIpv6ProfileGroup.setDescription('An obsolete collection of objects providing management of IPv6 Profile functionality in a Juniper product. This statement became obsolete when added ND support.')
juniIpv6ProfileGroup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 4, 2, 2)).setObjects(("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileSetMap"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileRouterName"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileIpv6Addr"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileIpv6MaskLen"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileMtu"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileSrcAddrValidEnable"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileInheritNumString"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileNdEnabled"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileNdManagedConfig"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileNdOtherConfig"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileNdSuppressRa"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileNdRaInterval"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileNdRaLifeTime"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileNdReachableTime"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileNdPrefix"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileNdPrefixLength"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileNdPrefixOnLinkFlag"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileNdPrefixAutonomousFlag"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileNdPrefixPreferredLifetime"), ("Juniper-IPV6-PROFILE-MIB", "juniIpv6ProfileNdPrefixValidLifetime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniIpv6ProfileGroup1 = juniIpv6ProfileGroup1.setStatus('current')
if mibBuilder.loadTexts: juniIpv6ProfileGroup1.setDescription('The basic collection of objects providing management of IPv6 Profile functionality in a Juniper product.')
mibBuilder.exportSymbols("Juniper-IPV6-PROFILE-MIB", juniIpv6Profile=juniIpv6Profile, juniIpv6ProfileSrcAddrValidEnable=juniIpv6ProfileSrcAddrValidEnable, juniIpv6ProfileGroup=juniIpv6ProfileGroup, juniIpv6ProfileNdSuppressRa=juniIpv6ProfileNdSuppressRa, juniIpv6ProfileIpv6MaskLen=juniIpv6ProfileIpv6MaskLen, juniIpv6ProfileObjects=juniIpv6ProfileObjects, juniIpv6ProfileNdPrefixPreferredLifetime=juniIpv6ProfileNdPrefixPreferredLifetime, juniIpv6ProfileCompliance1=juniIpv6ProfileCompliance1, juniIpv6ProfileNdPrefixValidLifetime=juniIpv6ProfileNdPrefixValidLifetime, juniIpv6ProfileNdPrefixLength=juniIpv6ProfileNdPrefixLength, juniIpv6ProfileMIB=juniIpv6ProfileMIB, juniIpv6ProfileNdPrefix=juniIpv6ProfileNdPrefix, juniIpv6ProfileGroup1=juniIpv6ProfileGroup1, juniIpv6ProfileIpv6Addr=juniIpv6ProfileIpv6Addr, juniIpv6ProfileNdRaLifeTime=juniIpv6ProfileNdRaLifeTime, juniIpv6ProfileTable=juniIpv6ProfileTable, juniIpv6ProfileNdOtherConfig=juniIpv6ProfileNdOtherConfig, juniIpv6ProfileNdPrefixOnLinkFlag=juniIpv6ProfileNdPrefixOnLinkFlag, juniIpv6ProfileMtu=juniIpv6ProfileMtu, juniIpv6ProfileMIBCompliances=juniIpv6ProfileMIBCompliances, juniIpv6ProfileNdEnabled=juniIpv6ProfileNdEnabled, juniIpv6ProfileMIBConformance=juniIpv6ProfileMIBConformance, juniIpv6ProfileInheritNumString=juniIpv6ProfileInheritNumString, juniIpv6ProfileNdPrefixAutonomousFlag=juniIpv6ProfileNdPrefixAutonomousFlag, juniIpv6ProfileMIBGroups=juniIpv6ProfileMIBGroups, juniIpv6ProfileCompliance=juniIpv6ProfileCompliance, juniIpv6ProfileSetMap=juniIpv6ProfileSetMap, juniIpv6ProfileNdReachableTime=juniIpv6ProfileNdReachableTime, juniIpv6ProfileId=juniIpv6ProfileId, juniIpv6ProfileNdRaInterval=juniIpv6ProfileNdRaInterval, PYSNMP_MODULE_ID=juniIpv6ProfileMIB, juniIpv6ProfileRouterName=juniIpv6ProfileRouterName, juniIpv6ProfileEntry=juniIpv6ProfileEntry, juniIpv6ProfileNdManagedConfig=juniIpv6ProfileNdManagedConfig)
|
(octet_string, integer, object_identifier) = mibBuilder.importSymbols('ASN1', 'OctetString', 'Integer', 'ObjectIdentifier')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(constraints_intersection, value_range_constraint, constraints_union, value_size_constraint, single_value_constraint) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ConstraintsIntersection', 'ValueRangeConstraint', 'ConstraintsUnion', 'ValueSizeConstraint', 'SingleValueConstraint')
(inet_address_i_pv6,) = mibBuilder.importSymbols('INET-ADDRESS-MIB', 'InetAddressIPv6')
(ipv6_address_prefix,) = mibBuilder.importSymbols('IPV6-TC', 'Ipv6AddressPrefix')
(juni_mibs,) = mibBuilder.importSymbols('Juniper-MIBs', 'juniMibs')
(juni_name, juni_set_map, juni_enable) = mibBuilder.importSymbols('Juniper-TC', 'JuniName', 'JuniSetMap', 'JuniEnable')
(notification_group, object_group, module_compliance) = mibBuilder.importSymbols('SNMPv2-CONF', 'NotificationGroup', 'ObjectGroup', 'ModuleCompliance')
(integer32, mib_scalar, mib_table, mib_table_row, mib_table_column, mib_identifier, counter64, module_identity, unsigned32, counter32, notification_type, bits, object_identity, gauge32, time_ticks, ip_address, iso) = mibBuilder.importSymbols('SNMPv2-SMI', 'Integer32', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'MibIdentifier', 'Counter64', 'ModuleIdentity', 'Unsigned32', 'Counter32', 'NotificationType', 'Bits', 'ObjectIdentity', 'Gauge32', 'TimeTicks', 'IpAddress', 'iso')
(display_string, textual_convention) = mibBuilder.importSymbols('SNMPv2-TC', 'DisplayString', 'TextualConvention')
juni_ipv6_profile_mib = module_identity((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68))
juniIpv6ProfileMIB.setRevisions(('2007-07-19 18:19', '2003-09-29 17:58'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts:
juniIpv6ProfileMIB.setRevisionsDescriptions(('Added ND support on dynamic interface.', 'Initial version of this MIB module.'))
if mibBuilder.loadTexts:
juniIpv6ProfileMIB.setLastUpdated('200707191819Z')
if mibBuilder.loadTexts:
juniIpv6ProfileMIB.setOrganization('Juniper Networks')
if mibBuilder.loadTexts:
juniIpv6ProfileMIB.setContactInfo(' Juniper Networks, Inc. Postal: 10 Technology Park Drive Westford MA 01886-3146 USA Tel: +1 978 589 5800 Email: [email protected]')
if mibBuilder.loadTexts:
juniIpv6ProfileMIB.setDescription('The IPv6 Profile MIB for the Juniper Networks enterprise.')
juni_ipv6_profile_objects = mib_identifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1))
juni_ipv6_profile = mib_identifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1))
juni_ipv6_profile_table = mib_table((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1))
if mibBuilder.loadTexts:
juniIpv6ProfileTable.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileTable.setDescription('The entries in this table describe profiles for configuring IP interfaces. Entries in this table are created/deleted as a side-effect of corresponding operations to the juniProfileNameTable in the Juniper-PROFILE-MIB.')
juni_ipv6_profile_entry = mib_table_row((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1)).setIndexNames((0, 'Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileId'))
if mibBuilder.loadTexts:
juniIpv6ProfileEntry.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileEntry.setDescription('A profile describing configuration of an IPv6 interface.')
juni_ipv6_profile_id = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 1), unsigned32())
if mibBuilder.loadTexts:
juniIpv6ProfileId.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileId.setDescription('The integer identifier associated with this profile. A value for this identifier is determined by locating or creating a profile name in the juniProfileNameTable.')
juni_ipv6_profile_set_map = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 2), juni_set_map()).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
juniIpv6ProfileSetMap.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileSetMap.setDescription("A bitmap representing which objects in this entry have been explicitly configured. See the definition of the JuniSetMap TEXTUAL-CONVENTION for details of use. The INDEX object(s) and this object are excluded from representation (i.e. their bits are never set). When a SET request does not explicitly configure JuniSetMap, bits in JuniSetMap are set as a side-effect of configuring other profile attributes in the same entry. If, however, a SET request explicitly configures JuniSetMap, the explicitly configured value overrides 1) any previous bit settings, and 2) any simultaneous 'side-effect' settings that would otherwise occur. Once set, bits can only be cleared by explicitly configuring JuniSetMap.")
juni_ipv6_profile_router_name = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 3), juni_name()).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
juniIpv6ProfileRouterName.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileRouterName.setDescription('The virtual router to which an IPv6 interface configured by this profile will be assigned, if other mechanisms do not otherwise specify a virtual router assignment.')
juni_ipv6_profile_ipv6_addr = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 4), inet_address_i_pv6().clone(hexValue='00000000000000000000000000000000')).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
juniIpv6ProfileIpv6Addr.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileIpv6Addr.setDescription('An IPv6 address to be used by an IPv6 interface configured by this profile. This object will have a value of 0::0 for an unnumbered interface.')
juni_ipv6_profile_ipv6_mask_len = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 5), integer32().subtype(subtypeSpec=value_range_constraint(0, 128))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
juniIpv6ProfileIpv6MaskLen.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileIpv6MaskLen.setDescription('An IPv6 address mask length to be used by an IPv6 interface configured by this profile. This object will have a value of 0 for an unnumbered interface.')
juni_ipv6_profile_mtu = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 6), integer32().subtype(subtypeSpec=constraints_union(value_range_constraint(0, 0), value_range_constraint(1280, 10240)))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
juniIpv6ProfileMtu.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileMtu.setDescription('The configured MTU size for this IPv6 network interface. If set to zero, the default MTU size, as determined by the underlying network media, is used.')
juni_ipv6_profile_src_addr_valid_enable = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 7), juni_enable().clone('disable')).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
juniIpv6ProfileSrcAddrValidEnable.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileSrcAddrValidEnable.setDescription('Enable/disable whether source addresses in received IPv6 packets are validated. Validation is performed by looking up the source IPv6 address in the routing database and determining whether the packet arrived on the expected interface; if not, the packet is discarded.')
juni_ipv6_profile_inherit_num_string = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 8), display_string().subtype(subtypeSpec=value_size_constraint(0, 80))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
juniIpv6ProfileInheritNumString.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileInheritNumString.setDescription("The text identifier of the numbered interface, associated with the specified virtual router, whose IPv6 address is used as the source address when transmitting IPv6 packets on unnumbered remote access user links. Types/formats/examples for this string include: Loopback loopback <id> 'loopback 0' ATM Virtual Circuit atm <slot>/<port>.<distinguisher> 'atm 3/1.100' Ethernet { fastEthernet | gigabitEthernet } <slot>/<port> 'fastEthernet 3/0' 'gigabitEthernet 3/0' Ethernet VLAN { fastEthernet | gigabitEthernet } <slot>/<port>:<vlanID> 'fastEthernet 3/0:1000' 'gigabitEthernet 3/0:1000' Channelized Serial serial <slot>/<port>:<channelSpecifier>[/<channelSpecifier>]* 'serial 3/0:4' (T1/E1) 'serial 3/0:2/4' (T3/E3) 'serial 3/0:2/1/1/4' (OC3/OC12 - channelized DS3) 'serial 3/0:2/1/1/1/4' (OC3/OC12 - virtual tributaries) Other formats may be supported over time. An empty string indicates the referenced interface is unspecified, e.g., when this IPv6 interface is numbered.")
juni_ipv6_profile_nd_enabled = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 9), juni_enable().clone('disable')).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
juniIpv6ProfileNdEnabled.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileNdEnabled.setDescription('Enable/disable ND for this IPv6 network interface.')
juni_ipv6_profile_nd_managed_config = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 10), juni_enable().clone('disable')).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
juniIpv6ProfileNdManagedConfig.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileNdManagedConfig.setDescription('Enable/disable ND managed config for this IPv6 network interface.')
juni_ipv6_profile_nd_other_config = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 11), juni_enable().clone('disable')).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
juniIpv6ProfileNdOtherConfig.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileNdOtherConfig.setDescription('Enable/disable ND other config for this IPv6 network interface.')
juni_ipv6_profile_nd_suppress_ra = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 12), juni_enable().clone('disable')).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
juniIpv6ProfileNdSuppressRa.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileNdSuppressRa.setDescription('Enable/disable ND suppress RA for this IPv6 network interface.')
juni_ipv6_profile_nd_ra_interval = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 13), integer32().subtype(subtypeSpec=value_range_constraint(3, 1800)).clone(200)).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
juniIpv6ProfileNdRaInterval.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileNdRaInterval.setDescription('The configured interval between IPv6 RA transmissions on the interface.')
juni_ipv6_profile_nd_ra_life_time = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 14), integer32().subtype(subtypeSpec=value_range_constraint(0, 1800)).clone(1800)).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
juniIpv6ProfileNdRaLifeTime.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileNdRaLifeTime.setDescription('The configured RA lifetime for this IPv6 network interface.')
juni_ipv6_profile_nd_reachable_time = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 15), integer32()).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
juniIpv6ProfileNdReachableTime.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileNdReachableTime.setDescription('The configured RA reachable time for this IPv6 network interface.')
juni_ipv6_profile_nd_prefix = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 16), ipv6_address_prefix()).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
juniIpv6ProfileNdPrefix.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileNdPrefix.setDescription('The prefix associated with the this interface.')
juni_ipv6_profile_nd_prefix_length = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 17), integer32().subtype(subtypeSpec=value_range_constraint(1, 128))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
juniIpv6ProfileNdPrefixLength.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileNdPrefixLength.setDescription('The length of the prefix (in bits).')
juni_ipv6_profile_nd_prefix_on_link_flag = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 18), juni_enable().clone('enable')).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
juniIpv6ProfileNdPrefixOnLinkFlag.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileNdPrefixOnLinkFlag.setDescription("This object has the value 'true(1)', if this prefix can be used for on-link determination and the value 'false(2)' otherwise.")
juni_ipv6_profile_nd_prefix_autonomous_flag = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 19), juni_enable().clone('enable')).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
juniIpv6ProfileNdPrefixAutonomousFlag.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileNdPrefixAutonomousFlag.setDescription('Autonomous address configuration flag. When true(1), indicates that this prefix can be used for autonomous address configuration (i.e. can be used to form a local interface address). If false(2), it is not used to autoconfigure a local interface address.')
juni_ipv6_profile_nd_prefix_preferred_lifetime = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 20), integer32().clone(604800)).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
juniIpv6ProfileNdPrefixPreferredLifetime.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileNdPrefixPreferredLifetime.setDescription('It is the length of time in seconds that this prefix will remain preferred, i.e. time until deprecation. A value of 4,294,967,295 represents infinity. The address generated from a deprecated prefix should no longer be used as a source address in new communications, but packets received on such an interface are processed as expected.')
juni_ipv6_profile_nd_prefix_valid_lifetime = mib_table_column((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 1, 1, 1, 1, 21), integer32().clone(2592000)).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
juniIpv6ProfileNdPrefixValidLifetime.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileNdPrefixValidLifetime.setDescription('It is the length of time in seconds that this prefix will remain valid, i.e. time until invalidation. A value of 4,294,967,295 represents infinity. The address generated from an invalidated prefix should not appear as the destination or source address of a packet.')
juni_ipv6_profile_mib_conformance = mib_identifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 4))
juni_ipv6_profile_mib_compliances = mib_identifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 4, 1))
juni_ipv6_profile_mib_groups = mib_identifier((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 4, 2))
juni_ipv6_profile_compliance = module_compliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 4, 1, 1)).setObjects(('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileGroup'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juni_ipv6_profile_compliance = juniIpv6ProfileCompliance.setStatus('obsolete')
if mibBuilder.loadTexts:
juniIpv6ProfileCompliance.setDescription('Obsolete Compliance statement for systems supporting IPv6 configuration profiles. This statement became obsolete when added ND support.')
juni_ipv6_profile_compliance1 = module_compliance((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 4, 1, 2)).setObjects(('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileGroup1'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juni_ipv6_profile_compliance1 = juniIpv6ProfileCompliance1.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileCompliance1.setDescription('Compliance statement for systems supporting IPv6 configuration profiles, incorporating support of ND on dynamical interface.')
juni_ipv6_profile_group = object_group((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 4, 2, 1)).setObjects(('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileSetMap'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileRouterName'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileIpv6Addr'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileIpv6MaskLen'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileMtu'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileSrcAddrValidEnable'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileInheritNumString'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juni_ipv6_profile_group = juniIpv6ProfileGroup.setStatus('obsolete')
if mibBuilder.loadTexts:
juniIpv6ProfileGroup.setDescription('An obsolete collection of objects providing management of IPv6 Profile functionality in a Juniper product. This statement became obsolete when added ND support.')
juni_ipv6_profile_group1 = object_group((1, 3, 6, 1, 4, 1, 4874, 2, 2, 68, 4, 2, 2)).setObjects(('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileSetMap'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileRouterName'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileIpv6Addr'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileIpv6MaskLen'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileMtu'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileSrcAddrValidEnable'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileInheritNumString'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileNdEnabled'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileNdManagedConfig'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileNdOtherConfig'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileNdSuppressRa'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileNdRaInterval'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileNdRaLifeTime'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileNdReachableTime'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileNdPrefix'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileNdPrefixLength'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileNdPrefixOnLinkFlag'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileNdPrefixAutonomousFlag'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileNdPrefixPreferredLifetime'), ('Juniper-IPV6-PROFILE-MIB', 'juniIpv6ProfileNdPrefixValidLifetime'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juni_ipv6_profile_group1 = juniIpv6ProfileGroup1.setStatus('current')
if mibBuilder.loadTexts:
juniIpv6ProfileGroup1.setDescription('The basic collection of objects providing management of IPv6 Profile functionality in a Juniper product.')
mibBuilder.exportSymbols('Juniper-IPV6-PROFILE-MIB', juniIpv6Profile=juniIpv6Profile, juniIpv6ProfileSrcAddrValidEnable=juniIpv6ProfileSrcAddrValidEnable, juniIpv6ProfileGroup=juniIpv6ProfileGroup, juniIpv6ProfileNdSuppressRa=juniIpv6ProfileNdSuppressRa, juniIpv6ProfileIpv6MaskLen=juniIpv6ProfileIpv6MaskLen, juniIpv6ProfileObjects=juniIpv6ProfileObjects, juniIpv6ProfileNdPrefixPreferredLifetime=juniIpv6ProfileNdPrefixPreferredLifetime, juniIpv6ProfileCompliance1=juniIpv6ProfileCompliance1, juniIpv6ProfileNdPrefixValidLifetime=juniIpv6ProfileNdPrefixValidLifetime, juniIpv6ProfileNdPrefixLength=juniIpv6ProfileNdPrefixLength, juniIpv6ProfileMIB=juniIpv6ProfileMIB, juniIpv6ProfileNdPrefix=juniIpv6ProfileNdPrefix, juniIpv6ProfileGroup1=juniIpv6ProfileGroup1, juniIpv6ProfileIpv6Addr=juniIpv6ProfileIpv6Addr, juniIpv6ProfileNdRaLifeTime=juniIpv6ProfileNdRaLifeTime, juniIpv6ProfileTable=juniIpv6ProfileTable, juniIpv6ProfileNdOtherConfig=juniIpv6ProfileNdOtherConfig, juniIpv6ProfileNdPrefixOnLinkFlag=juniIpv6ProfileNdPrefixOnLinkFlag, juniIpv6ProfileMtu=juniIpv6ProfileMtu, juniIpv6ProfileMIBCompliances=juniIpv6ProfileMIBCompliances, juniIpv6ProfileNdEnabled=juniIpv6ProfileNdEnabled, juniIpv6ProfileMIBConformance=juniIpv6ProfileMIBConformance, juniIpv6ProfileInheritNumString=juniIpv6ProfileInheritNumString, juniIpv6ProfileNdPrefixAutonomousFlag=juniIpv6ProfileNdPrefixAutonomousFlag, juniIpv6ProfileMIBGroups=juniIpv6ProfileMIBGroups, juniIpv6ProfileCompliance=juniIpv6ProfileCompliance, juniIpv6ProfileSetMap=juniIpv6ProfileSetMap, juniIpv6ProfileNdReachableTime=juniIpv6ProfileNdReachableTime, juniIpv6ProfileId=juniIpv6ProfileId, juniIpv6ProfileNdRaInterval=juniIpv6ProfileNdRaInterval, PYSNMP_MODULE_ID=juniIpv6ProfileMIB, juniIpv6ProfileRouterName=juniIpv6ProfileRouterName, juniIpv6ProfileEntry=juniIpv6ProfileEntry, juniIpv6ProfileNdManagedConfig=juniIpv6ProfileNdManagedConfig)
|
r'''
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
class TlsNotEnoughDataError(Exception):
"""Error in TLS parsing where the TLS record is so far valid but incomplete"""
pass
class TlsRecordIncompleteError(TlsNotEnoughDataError):
"""Error for when a TLS Record appears valid but is not enough data is present to parse
the record"""
def __init__(self, data_available, record_size):
self.data_available = data_available
self.record_size = record_size
class TlsMessageFragmentedError(TlsNotEnoughDataError):
"""Error for when not enough data is present to parse a TLS message because of
fragmentation"""
def __init__(self, fragment_data, data_consumed):
self.fragment_data = fragment_data
self.data_consumed = data_consumed
|
"""
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class Tlsnotenoughdataerror(Exception):
"""Error in TLS parsing where the TLS record is so far valid but incomplete"""
pass
class Tlsrecordincompleteerror(TlsNotEnoughDataError):
"""Error for when a TLS Record appears valid but is not enough data is present to parse
the record"""
def __init__(self, data_available, record_size):
self.data_available = data_available
self.record_size = record_size
class Tlsmessagefragmentederror(TlsNotEnoughDataError):
"""Error for when not enough data is present to parse a TLS message because of
fragmentation"""
def __init__(self, fragment_data, data_consumed):
self.fragment_data = fragment_data
self.data_consumed = data_consumed
|
print("My name is Jessica Bonnie Ayomide")
print("JJJJJJJJJJJJJ BBBBBBBBBB A")
print(" J B B A A")
print(" J B B A A")
print(" J B B A A")
print(" J B B A A")
print(" J BBBBBBBBBB AAAAAAAAAA")
print(" J B B A A")
print(" J B B A A")
print("J J B B A A")
print(" J J B B A A")
print(" JJJJJ BBBBBBBBBB A A")
|
print('My name is Jessica Bonnie Ayomide')
print('JJJJJJJJJJJJJ BBBBBBBBBB A')
print(' J B B A A')
print(' J B B A A')
print(' J B B A A')
print(' J B B A A')
print(' J BBBBBBBBBB AAAAAAAAAA')
print(' J B B A A')
print(' J B B A A')
print('J J B B A A')
print(' J J B B A A')
print(' JJJJJ BBBBBBBBBB A A')
|
#!/usr/bin/env python3
def main():
arr = []
fname = sys.argv[1]
with open(fname, 'r') as f:
for line in f:
arr.append(int(line.rstrip('\r\n')))
quicksort(arr, start=0, end=len(arr)-1)
print('Sorted list is: ', arr)
return
def quicksort(arr, start, end):
if end - start < 1:
return 0
b = start + 1
for i in range(start+1, end):
if arr[i] <= arr[start]:
arr[b], arr[i] = arr[i], arr[b]
b += 1
arr[start], arr[b-1] = arr[b-1], arr[start]
quicksort(arr, start, b-1)
quicksort(arr, b, end)
if __name__ == '__main__':
main()
|
def main():
arr = []
fname = sys.argv[1]
with open(fname, 'r') as f:
for line in f:
arr.append(int(line.rstrip('\r\n')))
quicksort(arr, start=0, end=len(arr) - 1)
print('Sorted list is: ', arr)
return
def quicksort(arr, start, end):
if end - start < 1:
return 0
b = start + 1
for i in range(start + 1, end):
if arr[i] <= arr[start]:
(arr[b], arr[i]) = (arr[i], arr[b])
b += 1
(arr[start], arr[b - 1]) = (arr[b - 1], arr[start])
quicksort(arr, start, b - 1)
quicksort(arr, b, end)
if __name__ == '__main__':
main()
|
# AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {"test_eq": "00_core.ipynb",
"listify": "00_core.ipynb",
"test_in": "00_core.ipynb",
"test_err": "00_core.ipynb",
"configure_logging": "00_core.ipynb",
"setup_dataframe_copy_logging": "00_core.ipynb",
"n_total_series": "00_core.ipynb",
"n_days_total": "00_core.ipynb",
"raw_dir": "00_core.ipynb",
"read_series_sample": "00_core.ipynb",
"melt_sales_series": "00_core.ipynb",
"extract_day_ids": "00_core.ipynb",
"join_w_calendar": "00_core.ipynb",
"join_w_prices": "00_core.ipynb",
"to_parquet": "00_core.ipynb",
"extract_id_columns": "00_core.ipynb",
"get_submission_template_melt": "00_core.ipynb",
"ParquetIterableDataset": "01_petastorm.ipynb",
"prepare_data_on_disk": "02_pipeline.ipynb"}
modules = ["core.py",
"petastorm.py",
"pipeline.py"]
doc_url = "https://cluePrints.github.io/kaggle-m5-nbdev/"
git_url = "https://github.com/cluePrints/kaggle-m5-nbdev/tree/master/"
def custom_doc_links(name): return None
|
__all__ = ['index', 'modules', 'custom_doc_links', 'git_url']
index = {'test_eq': '00_core.ipynb', 'listify': '00_core.ipynb', 'test_in': '00_core.ipynb', 'test_err': '00_core.ipynb', 'configure_logging': '00_core.ipynb', 'setup_dataframe_copy_logging': '00_core.ipynb', 'n_total_series': '00_core.ipynb', 'n_days_total': '00_core.ipynb', 'raw_dir': '00_core.ipynb', 'read_series_sample': '00_core.ipynb', 'melt_sales_series': '00_core.ipynb', 'extract_day_ids': '00_core.ipynb', 'join_w_calendar': '00_core.ipynb', 'join_w_prices': '00_core.ipynb', 'to_parquet': '00_core.ipynb', 'extract_id_columns': '00_core.ipynb', 'get_submission_template_melt': '00_core.ipynb', 'ParquetIterableDataset': '01_petastorm.ipynb', 'prepare_data_on_disk': '02_pipeline.ipynb'}
modules = ['core.py', 'petastorm.py', 'pipeline.py']
doc_url = 'https://cluePrints.github.io/kaggle-m5-nbdev/'
git_url = 'https://github.com/cluePrints/kaggle-m5-nbdev/tree/master/'
def custom_doc_links(name):
return None
|
# -*- coding: utf-8 -*-
"""@package Methods.Machine.SlotWind.comp_surface_wind
Slot Winding Computation of surface (Numerical) method
@date Created on Wed Jul 25 14:22:33 2018
@copyright (C) 2014-2015 EOMYS ENGINEERING.
@author pierre_b
"""
def comp_surface_wind(self):
"""Compute the Slot winding surface (by numerical computation).
Caution, the bottom of the Slot is an Arc
Parameters
----------
self : SlotWind
A SlotWind object
Returns
-------
S: float
Slot total surface [m**2]
"""
surf = self.build_geometry_wind(Nrad=1, Ntan=1)
return surf[0].comp_surface()
|
"""@package Methods.Machine.SlotWind.comp_surface_wind
Slot Winding Computation of surface (Numerical) method
@date Created on Wed Jul 25 14:22:33 2018
@copyright (C) 2014-2015 EOMYS ENGINEERING.
@author pierre_b
"""
def comp_surface_wind(self):
"""Compute the Slot winding surface (by numerical computation).
Caution, the bottom of the Slot is an Arc
Parameters
----------
self : SlotWind
A SlotWind object
Returns
-------
S: float
Slot total surface [m**2]
"""
surf = self.build_geometry_wind(Nrad=1, Ntan=1)
return surf[0].comp_surface()
|
# code
'''python'''
class Solution(object):
def lengthOfLastWord(self, s):
self.s=s
string = self.s.strip().split(' ')[-1]
return len(string)
|
"""python"""
class Solution(object):
def length_of_last_word(self, s):
self.s = s
string = self.s.strip().split(' ')[-1]
return len(string)
|
def clean_sentence(output, data_loader):
start_word = data_loader.dataset.vocab.start_word
end_word = data_loader.dataset.vocab.end_word
unk_word = data_loader.dataset.vocab.unk_word
words = []
for i in range(len(output)):
word_idx = output[i]
word = data_loader.dataset.vocab.idx2word.get(word_idx)
if word == end_word:
break
#elif word != start_word and word != unk_word:
elif word != unk_word:
words.append(word)
return " ".join(words)
|
def clean_sentence(output, data_loader):
start_word = data_loader.dataset.vocab.start_word
end_word = data_loader.dataset.vocab.end_word
unk_word = data_loader.dataset.vocab.unk_word
words = []
for i in range(len(output)):
word_idx = output[i]
word = data_loader.dataset.vocab.idx2word.get(word_idx)
if word == end_word:
break
elif word != unk_word:
words.append(word)
return ' '.join(words)
|
#
# PySNMP MIB module TPLINK-SSH-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/TPLINK-SSH-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:25:53 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ValueRangeConstraint, ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Counter64, ModuleIdentity, iso, Gauge32, Unsigned32, IpAddress, Bits, Counter32, NotificationType, ObjectIdentity, TimeTicks, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Counter64", "ModuleIdentity", "iso", "Gauge32", "Unsigned32", "IpAddress", "Bits", "Counter32", "NotificationType", "ObjectIdentity", "TimeTicks", "Integer32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
tplinkMgmt, = mibBuilder.importSymbols("TPLINK-MIB", "tplinkMgmt")
tplinkSshMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 11863, 6, 5))
tplinkSshMIB.setRevisions(('2012-12-13 09:30',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: tplinkSshMIB.setRevisionsDescriptions(('Initial version of this MIB module.',))
if mibBuilder.loadTexts: tplinkSshMIB.setLastUpdated('201212130930Z')
if mibBuilder.loadTexts: tplinkSshMIB.setOrganization('TPLINK')
if mibBuilder.loadTexts: tplinkSshMIB.setContactInfo('www.tplink.com.cn')
if mibBuilder.loadTexts: tplinkSshMIB.setDescription('Private MIB for SSH configuration.')
tplinkSshMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1))
tplinkSshNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 11863, 6, 5, 2))
tpSshEnable = MibScalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tpSshEnable.setStatus('current')
if mibBuilder.loadTexts: tpSshEnable.setDescription('0. disable 1. enable')
tpSshProtocolV1Enable = MibScalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tpSshProtocolV1Enable.setStatus('current')
if mibBuilder.loadTexts: tpSshProtocolV1Enable.setDescription('0. disable 1. enable')
tpSshProtocolV2Enable = MibScalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tpSshProtocolV2Enable.setStatus('current')
if mibBuilder.loadTexts: tpSshProtocolV2Enable.setDescription('0. disable 1. enable')
tpSshQuietPeriod = MibScalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 120))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tpSshQuietPeriod.setStatus('current')
if mibBuilder.loadTexts: tpSshQuietPeriod.setDescription('quiet period(1-120 second)')
tpSshMaxConnections = MibScalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 5))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tpSshMaxConnections.setStatus('current')
if mibBuilder.loadTexts: tpSshMaxConnections.setDescription('max connection(1-5)')
tpSshEncryptAlgAES128Enable = MibScalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tpSshEncryptAlgAES128Enable.setStatus('current')
if mibBuilder.loadTexts: tpSshEncryptAlgAES128Enable.setDescription('0. disable 1. enable')
tpSshEncryptAlgAES192Enable = MibScalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tpSshEncryptAlgAES192Enable.setStatus('current')
if mibBuilder.loadTexts: tpSshEncryptAlgAES192Enable.setDescription('0. disable 1. enable')
tpSshEncryptAlgAES256Enable = MibScalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tpSshEncryptAlgAES256Enable.setStatus('current')
if mibBuilder.loadTexts: tpSshEncryptAlgAES256Enable.setDescription('0. disable 1. enable')
tpSshEncryptAlgBlowfishEnable = MibScalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tpSshEncryptAlgBlowfishEnable.setStatus('current')
if mibBuilder.loadTexts: tpSshEncryptAlgBlowfishEnable.setDescription('0. disable 1. enable')
tpSshEncryptAlgCast128Enable = MibScalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tpSshEncryptAlgCast128Enable.setStatus('current')
if mibBuilder.loadTexts: tpSshEncryptAlgCast128Enable.setDescription('0. disable 1. enable')
tpSshEncryptAlg3DESEnable = MibScalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tpSshEncryptAlg3DESEnable.setStatus('current')
if mibBuilder.loadTexts: tpSshEncryptAlg3DESEnable.setDescription('0. disable 1. enable')
tpSshInteAlgSHA1Enable = MibScalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tpSshInteAlgSHA1Enable.setStatus('current')
if mibBuilder.loadTexts: tpSshInteAlgSHA1Enable.setDescription('0. disable 1. enable')
tpSshInteAlgMD5Enable = MibScalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disable", 0), ("enable", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: tpSshInteAlgMD5Enable.setStatus('current')
if mibBuilder.loadTexts: tpSshInteAlgMD5Enable.setDescription('0. disable 1. enable')
mibBuilder.exportSymbols("TPLINK-SSH-MIB", tpSshEncryptAlgCast128Enable=tpSshEncryptAlgCast128Enable, tplinkSshMIBObjects=tplinkSshMIBObjects, tpSshEncryptAlgBlowfishEnable=tpSshEncryptAlgBlowfishEnable, tpSshEncryptAlgAES128Enable=tpSshEncryptAlgAES128Enable, tpSshProtocolV2Enable=tpSshProtocolV2Enable, tpSshMaxConnections=tpSshMaxConnections, tpSshInteAlgMD5Enable=tpSshInteAlgMD5Enable, tpSshProtocolV1Enable=tpSshProtocolV1Enable, tpSshEncryptAlg3DESEnable=tpSshEncryptAlg3DESEnable, PYSNMP_MODULE_ID=tplinkSshMIB, tplinkSshNotifications=tplinkSshNotifications, tpSshInteAlgSHA1Enable=tpSshInteAlgSHA1Enable, tplinkSshMIB=tplinkSshMIB, tpSshEncryptAlgAES192Enable=tpSshEncryptAlgAES192Enable, tpSshEncryptAlgAES256Enable=tpSshEncryptAlgAES256Enable, tpSshEnable=tpSshEnable, tpSshQuietPeriod=tpSshQuietPeriod)
|
(octet_string, integer, object_identifier) = mibBuilder.importSymbols('ASN1', 'OctetString', 'Integer', 'ObjectIdentifier')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(value_size_constraint, value_range_constraint, constraints_union, constraints_intersection, single_value_constraint) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ValueSizeConstraint', 'ValueRangeConstraint', 'ConstraintsUnion', 'ConstraintsIntersection', 'SingleValueConstraint')
(module_compliance, notification_group) = mibBuilder.importSymbols('SNMPv2-CONF', 'ModuleCompliance', 'NotificationGroup')
(mib_scalar, mib_table, mib_table_row, mib_table_column, mib_identifier, counter64, module_identity, iso, gauge32, unsigned32, ip_address, bits, counter32, notification_type, object_identity, time_ticks, integer32) = mibBuilder.importSymbols('SNMPv2-SMI', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'MibIdentifier', 'Counter64', 'ModuleIdentity', 'iso', 'Gauge32', 'Unsigned32', 'IpAddress', 'Bits', 'Counter32', 'NotificationType', 'ObjectIdentity', 'TimeTicks', 'Integer32')
(textual_convention, display_string) = mibBuilder.importSymbols('SNMPv2-TC', 'TextualConvention', 'DisplayString')
(tplink_mgmt,) = mibBuilder.importSymbols('TPLINK-MIB', 'tplinkMgmt')
tplink_ssh_mib = module_identity((1, 3, 6, 1, 4, 1, 11863, 6, 5))
tplinkSshMIB.setRevisions(('2012-12-13 09:30',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts:
tplinkSshMIB.setRevisionsDescriptions(('Initial version of this MIB module.',))
if mibBuilder.loadTexts:
tplinkSshMIB.setLastUpdated('201212130930Z')
if mibBuilder.loadTexts:
tplinkSshMIB.setOrganization('TPLINK')
if mibBuilder.loadTexts:
tplinkSshMIB.setContactInfo('www.tplink.com.cn')
if mibBuilder.loadTexts:
tplinkSshMIB.setDescription('Private MIB for SSH configuration.')
tplink_ssh_mib_objects = mib_identifier((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1))
tplink_ssh_notifications = mib_identifier((1, 3, 6, 1, 4, 1, 11863, 6, 5, 2))
tp_ssh_enable = mib_scalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 1), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(0, 1))).clone(namedValues=named_values(('disable', 0), ('enable', 1)))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
tpSshEnable.setStatus('current')
if mibBuilder.loadTexts:
tpSshEnable.setDescription('0. disable 1. enable')
tp_ssh_protocol_v1_enable = mib_scalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 2), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(0, 1))).clone(namedValues=named_values(('disable', 0), ('enable', 1)))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
tpSshProtocolV1Enable.setStatus('current')
if mibBuilder.loadTexts:
tpSshProtocolV1Enable.setDescription('0. disable 1. enable')
tp_ssh_protocol_v2_enable = mib_scalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 3), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(0, 1))).clone(namedValues=named_values(('disable', 0), ('enable', 1)))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
tpSshProtocolV2Enable.setStatus('current')
if mibBuilder.loadTexts:
tpSshProtocolV2Enable.setDescription('0. disable 1. enable')
tp_ssh_quiet_period = mib_scalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 4), integer32().subtype(subtypeSpec=value_range_constraint(1, 120))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
tpSshQuietPeriod.setStatus('current')
if mibBuilder.loadTexts:
tpSshQuietPeriod.setDescription('quiet period(1-120 second)')
tp_ssh_max_connections = mib_scalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 5), integer32().subtype(subtypeSpec=value_range_constraint(1, 5))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
tpSshMaxConnections.setStatus('current')
if mibBuilder.loadTexts:
tpSshMaxConnections.setDescription('max connection(1-5)')
tp_ssh_encrypt_alg_aes128_enable = mib_scalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 6), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(0, 1))).clone(namedValues=named_values(('disable', 0), ('enable', 1)))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
tpSshEncryptAlgAES128Enable.setStatus('current')
if mibBuilder.loadTexts:
tpSshEncryptAlgAES128Enable.setDescription('0. disable 1. enable')
tp_ssh_encrypt_alg_aes192_enable = mib_scalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 7), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(0, 1))).clone(namedValues=named_values(('disable', 0), ('enable', 1)))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
tpSshEncryptAlgAES192Enable.setStatus('current')
if mibBuilder.loadTexts:
tpSshEncryptAlgAES192Enable.setDescription('0. disable 1. enable')
tp_ssh_encrypt_alg_aes256_enable = mib_scalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 8), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(0, 1))).clone(namedValues=named_values(('disable', 0), ('enable', 1)))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
tpSshEncryptAlgAES256Enable.setStatus('current')
if mibBuilder.loadTexts:
tpSshEncryptAlgAES256Enable.setDescription('0. disable 1. enable')
tp_ssh_encrypt_alg_blowfish_enable = mib_scalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 9), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(0, 1))).clone(namedValues=named_values(('disable', 0), ('enable', 1)))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
tpSshEncryptAlgBlowfishEnable.setStatus('current')
if mibBuilder.loadTexts:
tpSshEncryptAlgBlowfishEnable.setDescription('0. disable 1. enable')
tp_ssh_encrypt_alg_cast128_enable = mib_scalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 10), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(0, 1))).clone(namedValues=named_values(('disable', 0), ('enable', 1)))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
tpSshEncryptAlgCast128Enable.setStatus('current')
if mibBuilder.loadTexts:
tpSshEncryptAlgCast128Enable.setDescription('0. disable 1. enable')
tp_ssh_encrypt_alg3_des_enable = mib_scalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 11), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(0, 1))).clone(namedValues=named_values(('disable', 0), ('enable', 1)))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
tpSshEncryptAlg3DESEnable.setStatus('current')
if mibBuilder.loadTexts:
tpSshEncryptAlg3DESEnable.setDescription('0. disable 1. enable')
tp_ssh_inte_alg_sha1_enable = mib_scalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 12), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(0, 1))).clone(namedValues=named_values(('disable', 0), ('enable', 1)))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
tpSshInteAlgSHA1Enable.setStatus('current')
if mibBuilder.loadTexts:
tpSshInteAlgSHA1Enable.setDescription('0. disable 1. enable')
tp_ssh_inte_alg_md5_enable = mib_scalar((1, 3, 6, 1, 4, 1, 11863, 6, 5, 1, 13), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(0, 1))).clone(namedValues=named_values(('disable', 0), ('enable', 1)))).setMaxAccess('readwrite')
if mibBuilder.loadTexts:
tpSshInteAlgMD5Enable.setStatus('current')
if mibBuilder.loadTexts:
tpSshInteAlgMD5Enable.setDescription('0. disable 1. enable')
mibBuilder.exportSymbols('TPLINK-SSH-MIB', tpSshEncryptAlgCast128Enable=tpSshEncryptAlgCast128Enable, tplinkSshMIBObjects=tplinkSshMIBObjects, tpSshEncryptAlgBlowfishEnable=tpSshEncryptAlgBlowfishEnable, tpSshEncryptAlgAES128Enable=tpSshEncryptAlgAES128Enable, tpSshProtocolV2Enable=tpSshProtocolV2Enable, tpSshMaxConnections=tpSshMaxConnections, tpSshInteAlgMD5Enable=tpSshInteAlgMD5Enable, tpSshProtocolV1Enable=tpSshProtocolV1Enable, tpSshEncryptAlg3DESEnable=tpSshEncryptAlg3DESEnable, PYSNMP_MODULE_ID=tplinkSshMIB, tplinkSshNotifications=tplinkSshNotifications, tpSshInteAlgSHA1Enable=tpSshInteAlgSHA1Enable, tplinkSshMIB=tplinkSshMIB, tpSshEncryptAlgAES192Enable=tpSshEncryptAlgAES192Enable, tpSshEncryptAlgAES256Enable=tpSshEncryptAlgAES256Enable, tpSshEnable=tpSshEnable, tpSshQuietPeriod=tpSshQuietPeriod)
|
REGULAR_MARGIN_REQUIREMENT = 0.25
LEVERAGED_MARGIN_REQUIREMENT = 0.75
def max_margin(leveraged_value, regular_value, percentage_of_leveraged_drop,
percentage_of_regular_drop, current_loan):
leveraged_value_after_drop = leveraged_value * (1 - percentage_of_leveraged_drop)
regular_value_after_drop = regular_value * (1 - percentage_of_regular_drop)
leveraged_margin_impact = (1 - LEVERAGED_MARGIN_REQUIREMENT) * leveraged_value_after_drop
regular_margin_impact = (1 - REGULAR_MARGIN_REQUIREMENT) * regular_value_after_drop
return (leveraged_margin_impact + regular_margin_impact -
current_loan) / (1 - (1 - REGULAR_MARGIN_REQUIREMENT) *
(1 - percentage_of_regular_drop))
def main():
return max_margin(20000, 10000, 0.6, 0.5, 0)
if __name__ == '__main__':
print(main())
|
regular_margin_requirement = 0.25
leveraged_margin_requirement = 0.75
def max_margin(leveraged_value, regular_value, percentage_of_leveraged_drop, percentage_of_regular_drop, current_loan):
leveraged_value_after_drop = leveraged_value * (1 - percentage_of_leveraged_drop)
regular_value_after_drop = regular_value * (1 - percentage_of_regular_drop)
leveraged_margin_impact = (1 - LEVERAGED_MARGIN_REQUIREMENT) * leveraged_value_after_drop
regular_margin_impact = (1 - REGULAR_MARGIN_REQUIREMENT) * regular_value_after_drop
return (leveraged_margin_impact + regular_margin_impact - current_loan) / (1 - (1 - REGULAR_MARGIN_REQUIREMENT) * (1 - percentage_of_regular_drop))
def main():
return max_margin(20000, 10000, 0.6, 0.5, 0)
if __name__ == '__main__':
print(main())
|
class Solution:
def __init__(self):
pass
# o(mn)
def print_lcs(self, str1, str2):
m = len(str1)
n = len(str2)
if n == 0 or m == 0:
return 0
# declare an two dimension array store calculated dp value of lcs(str1[i]
R = [[None] * (n + 1) for i in xrange(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
R[i][j] = 0
elif str1[i - 1] == str2[j - 1]:
R[i][j] = 1 + R[i - 1][j - 1]
else:
R[i][j] = max(R[i - 1][j], R[i][j - 1])
lcs = []
i = m
j = n
while i > 0 and j > 0:
if str1[i - 1] == str2[j - 1]:
lcs.insert(0, str1[i - 1])
i -= 1
j -= 1
elif R[i - 1][j] > R[i][j - 1]:
i -= 1
else:
j -= 1
return lcs
str1 = 'AGGTAB';
str2 = 'GXTXAYB';
# print(Solution().longest_sub_sequence(str1, str2, len(str1), len(str2)));
print(Solution().print_lcs(str1, str2))
|
class Solution:
def __init__(self):
pass
def print_lcs(self, str1, str2):
m = len(str1)
n = len(str2)
if n == 0 or m == 0:
return 0
r = [[None] * (n + 1) for i in xrange(m + 1)]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
R[i][j] = 0
elif str1[i - 1] == str2[j - 1]:
R[i][j] = 1 + R[i - 1][j - 1]
else:
R[i][j] = max(R[i - 1][j], R[i][j - 1])
lcs = []
i = m
j = n
while i > 0 and j > 0:
if str1[i - 1] == str2[j - 1]:
lcs.insert(0, str1[i - 1])
i -= 1
j -= 1
elif R[i - 1][j] > R[i][j - 1]:
i -= 1
else:
j -= 1
return lcs
str1 = 'AGGTAB'
str2 = 'GXTXAYB'
print(solution().print_lcs(str1, str2))
|
lines = None
with open('day07/input.txt') as f:
lines = f.readlines()
line = lines[0]
arr = list(map(lambda s: int(s), line.split(",")))
cost = []
prev = 0
q = 0
for i in range(2000):
cost.append(prev + q)
prev = prev + q
q += 1
min = 99999999999999
for pos in range(1500):
sum = 0
for x in arr:
sum += cost[abs(x - pos)]
if sum < min: min = sum
print(min)
|
lines = None
with open('day07/input.txt') as f:
lines = f.readlines()
line = lines[0]
arr = list(map(lambda s: int(s), line.split(',')))
cost = []
prev = 0
q = 0
for i in range(2000):
cost.append(prev + q)
prev = prev + q
q += 1
min = 99999999999999
for pos in range(1500):
sum = 0
for x in arr:
sum += cost[abs(x - pos)]
if sum < min:
min = sum
print(min)
|
target = "xilinx"
action = "synthesis"
syn_device = "xc7k325t"
syn_grade = "-2"
syn_package = "ffg900"
syn_top = "cm0_busy_wait_top"
syn_project = "cm0_busy_wait_top"
syn_tool = "vivado"
modules = {
"local" : [ "../../../top/kc705_busy_wait/verilog" ],
}
|
target = 'xilinx'
action = 'synthesis'
syn_device = 'xc7k325t'
syn_grade = '-2'
syn_package = 'ffg900'
syn_top = 'cm0_busy_wait_top'
syn_project = 'cm0_busy_wait_top'
syn_tool = 'vivado'
modules = {'local': ['../../../top/kc705_busy_wait/verilog']}
|
class DataObject:
def __init__(self, read_data=True):
if read_data:
self._run_methods('read')
self._run_methods('parse')
def _run_methods(self, method_type):
for method in [m for m in dir(self) if m.startswith('_{}_'.format(method_type))]:
getattr(self, method)()
def get_data(self):
raise NotImplementedError
|
class Dataobject:
def __init__(self, read_data=True):
if read_data:
self._run_methods('read')
self._run_methods('parse')
def _run_methods(self, method_type):
for method in [m for m in dir(self) if m.startswith('_{}_'.format(method_type))]:
getattr(self, method)()
def get_data(self):
raise NotImplementedError
|
def sumUpNumbers(inputString):
numbers = []
curr = ""
for i in inputString:
try:
x = int(i)
curr += i
except:
if curr != "":
numbers.append(curr)
curr = ""
if curr != "":
numbers.append(curr)
return sum([int(x) for x in numbers])
|
def sum_up_numbers(inputString):
numbers = []
curr = ''
for i in inputString:
try:
x = int(i)
curr += i
except:
if curr != '':
numbers.append(curr)
curr = ''
if curr != '':
numbers.append(curr)
return sum([int(x) for x in numbers])
|
#!/usr/bin/python3
def recsum(n): return n if n<=1 else n+recsum(n-1)
n = int(input("Enter your number\t"))
if n < 0:
print("Enter a positive number")
else:
print("The sum is",recsum(n))
|
def recsum(n):
return n if n <= 1 else n + recsum(n - 1)
n = int(input('Enter your number\t'))
if n < 0:
print('Enter a positive number')
else:
print('The sum is', recsum(n))
|
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
d = {}
left = -1
right = 0
max = 0
if len(s) < 2:
return len(s)
while right < len(s)-1:
d[s[right]] = right
right += 1
if d.has_key(s[right]) and d[s[right]] > left:
left = d[s[right]]
if right - left > max:
max = right - left
return max
|
class Solution(object):
def length_of_longest_substring(self, s):
"""
:type s: str
:rtype: int
"""
d = {}
left = -1
right = 0
max = 0
if len(s) < 2:
return len(s)
while right < len(s) - 1:
d[s[right]] = right
right += 1
if d.has_key(s[right]) and d[s[right]] > left:
left = d[s[right]]
if right - left > max:
max = right - left
return max
|
# -*- coding: utf-8 -*-
"""
@Datetime: 2019/1/2
@Author: Zhang Yafei
"""
|
"""
@Datetime: 2019/1/2
@Author: Zhang Yafei
"""
|
raio = float(input())
pi = 3.14159
VOLUME = (4 / 3) * pi * (raio**3)
print("VOLUME = {:.3f}".format(VOLUME))
|
raio = float(input())
pi = 3.14159
volume = 4 / 3 * pi * raio ** 3
print('VOLUME = {:.3f}'.format(VOLUME))
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: proto
class AuthMethod(object):
ANONYMOUS = 0
COOKIE = 1
TLS = 2
TICKET = 3
CRA = 4
SCRAM = 5
CRYPTOSIGN = 6
|
class Authmethod(object):
anonymous = 0
cookie = 1
tls = 2
ticket = 3
cra = 4
scram = 5
cryptosign = 6
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class BaseError(Exception):
"""Base error for all test runner errors."""
def __init__(self, message, is_infra_error=False):
super(BaseError, self).__init__(message)
self._is_infra_error = is_infra_error
def __eq__(self, other):
return (self.message == other.message
and self.is_infra_error == other.is_infra_error)
def __ne__(self, other):
return not self == other
@property
def is_infra_error(self):
"""Property to indicate if error was caused by an infrastructure issue."""
return self._is_infra_error
|
class Baseerror(Exception):
"""Base error for all test runner errors."""
def __init__(self, message, is_infra_error=False):
super(BaseError, self).__init__(message)
self._is_infra_error = is_infra_error
def __eq__(self, other):
return self.message == other.message and self.is_infra_error == other.is_infra_error
def __ne__(self, other):
return not self == other
@property
def is_infra_error(self):
"""Property to indicate if error was caused by an infrastructure issue."""
return self._is_infra_error
|
def for_e():
for row in range(6):
for col in range(4):
if row==2 or row==1 and col%3!=0 or row==4 and col>0 or col==0 and row==3:
print("*",end=" ")
else:
print(" ",end=" ")
print()
def while_e():
row=0
while row<6:
col=0
while col<4:
if row==2 or row==1 and col%3!=0 or row==4 and col>0 or col==0 and row==3:
print("*",end=" ")
else:
print(" ",end=" ")
col+=1
row+=1
print()
|
def for_e():
for row in range(6):
for col in range(4):
if row == 2 or (row == 1 and col % 3 != 0) or (row == 4 and col > 0) or (col == 0 and row == 3):
print('*', end=' ')
else:
print(' ', end=' ')
print()
def while_e():
row = 0
while row < 6:
col = 0
while col < 4:
if row == 2 or (row == 1 and col % 3 != 0) or (row == 4 and col > 0) or (col == 0 and row == 3):
print('*', end=' ')
else:
print(' ', end=' ')
col += 1
row += 1
print()
|
"""
Copyright 2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
EventMatch object. Describes a single matched event for EventMatcher.
"""
class EventMatch(object): # pylint: disable=too-few-public-methods
"""
EventMatcher callback object
"""
def __init__(self, ref, event_data, match):
"""
:param ref: reference object
:param event_data: original event data which matches
:param match: re.MatchObject or string depend on EventMatcher configuration
"""
self.ref = ref
self.event_data = event_data
self.match = match
|
"""
Copyright 2017 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
EventMatch object. Describes a single matched event for EventMatcher.
"""
class Eventmatch(object):
"""
EventMatcher callback object
"""
def __init__(self, ref, event_data, match):
"""
:param ref: reference object
:param event_data: original event data which matches
:param match: re.MatchObject or string depend on EventMatcher configuration
"""
self.ref = ref
self.event_data = event_data
self.match = match
|
def selection_sort(elements):
for i in range(len(elements) - 1):
min_index = i
for j in range(i + 1, len(elements)):
if elements[min_index] > elements[j]:
min_index = j
elements[i], elements[min_index] = elements[min_index], elements[i]
return elements
ele = [23, 35, 6, 42, 1, 4, 6, 97, 10]
print(selection_sort(ele))
|
def selection_sort(elements):
for i in range(len(elements) - 1):
min_index = i
for j in range(i + 1, len(elements)):
if elements[min_index] > elements[j]:
min_index = j
(elements[i], elements[min_index]) = (elements[min_index], elements[i])
return elements
ele = [23, 35, 6, 42, 1, 4, 6, 97, 10]
print(selection_sort(ele))
|
tab = 1
while tab <= 10:
print("Tabuada do", tab, ":", end="\t")
i = 1
while i <= 10:
print(tab*i, end = "\t")
i = i + 1
print()
tab = tab + 1
|
tab = 1
while tab <= 10:
print('Tabuada do', tab, ':', end='\t')
i = 1
while i <= 10:
print(tab * i, end='\t')
i = i + 1
print()
tab = tab + 1
|
load("@bazel_skylib//lib:shell.bzl", "shell")
def kubebuilder_manifests(name, srcs, config_root, **kwargs):
native.genrule(
name = name,
srcs = srcs,
outs = [name + ".yaml"],
cmd = """
tmp=$$(mktemp --directory)
cp -aL "%s/." "$$tmp"
$(location @io_k8s_sigs_kustomize_kustomize_v4//:v4) build "$$tmp/default" > $@
rm -r "$$tmp"
""" % config_root,
tools = [
"@io_k8s_sigs_kustomize_kustomize_v4//:v4",
],
**kwargs
)
def _ginkgo_test_impl(ctx):
wrapper = ctx.actions.declare_file(ctx.label.name)
ctx.actions.write(
output = wrapper,
content = """#!/usr/bin/env bash
set -e
exec {ginkgo} {ginkgo_args} {go_test} -- "$@"
""".format(
ginkgo = shell.quote(ctx.executable._ginkgo.short_path),
ginkgo_args = " ".join([shell.quote(arg) for arg in ctx.attr.ginkgo_args]),
# Ginkgo requires the precompiled binary end with ".test".
go_test = shell.quote(ctx.executable.go_test.short_path + ".test"),
),
is_executable = True,
)
return [DefaultInfo(
executable = wrapper,
runfiles = ctx.runfiles(
files = ctx.files.data,
symlinks = {ctx.executable.go_test.short_path + ".test": ctx.executable.go_test},
transitive_files = depset([], transitive = [ctx.attr._ginkgo.default_runfiles.files, ctx.attr.go_test.default_runfiles.files]),
),
)]
ginkgo_test = rule(
implementation = _ginkgo_test_impl,
attrs = {
"data": attr.label_list(allow_files = True),
"go_test": attr.label(executable = True, cfg = "target"),
"ginkgo_args": attr.string_list(),
"_ginkgo": attr.label(default = "@com_github_onsi_ginkgo//ginkgo", executable = True, cfg = "target"),
},
executable = True,
test = True,
)
|
load('@bazel_skylib//lib:shell.bzl', 'shell')
def kubebuilder_manifests(name, srcs, config_root, **kwargs):
native.genrule(name=name, srcs=srcs, outs=[name + '.yaml'], cmd='\ntmp=$$(mktemp --directory)\ncp -aL "%s/." "$$tmp"\n$(location @io_k8s_sigs_kustomize_kustomize_v4//:v4) build "$$tmp/default" > $@\nrm -r "$$tmp"\n ' % config_root, tools=['@io_k8s_sigs_kustomize_kustomize_v4//:v4'], **kwargs)
def _ginkgo_test_impl(ctx):
wrapper = ctx.actions.declare_file(ctx.label.name)
ctx.actions.write(output=wrapper, content='#!/usr/bin/env bash\nset -e\nexec {ginkgo} {ginkgo_args} {go_test} -- "$@"\n'.format(ginkgo=shell.quote(ctx.executable._ginkgo.short_path), ginkgo_args=' '.join([shell.quote(arg) for arg in ctx.attr.ginkgo_args]), go_test=shell.quote(ctx.executable.go_test.short_path + '.test')), is_executable=True)
return [default_info(executable=wrapper, runfiles=ctx.runfiles(files=ctx.files.data, symlinks={ctx.executable.go_test.short_path + '.test': ctx.executable.go_test}, transitive_files=depset([], transitive=[ctx.attr._ginkgo.default_runfiles.files, ctx.attr.go_test.default_runfiles.files])))]
ginkgo_test = rule(implementation=_ginkgo_test_impl, attrs={'data': attr.label_list(allow_files=True), 'go_test': attr.label(executable=True, cfg='target'), 'ginkgo_args': attr.string_list(), '_ginkgo': attr.label(default='@com_github_onsi_ginkgo//ginkgo', executable=True, cfg='target')}, executable=True, test=True)
|
def chk_p5m(n):
if n%5==0: return 0
elif n==1: return n
for i in range(2,n):
if n%i==0:
return n
return 0
def fab(n):
f=[0,1]
return [chk_p5m((f:=[f[-1],f[-1]+f[-2]])[0]) for i in range(n)]
#i know it's little confusing most won't understand... but tried to do something unique
print(*fab(int(input())))
|
def chk_p5m(n):
if n % 5 == 0:
return 0
elif n == 1:
return n
for i in range(2, n):
if n % i == 0:
return n
return 0
def fab(n):
f = [0, 1]
return [chk_p5m((f := [f[-1], f[-1] + f[-2]])[0]) for i in range(n)]
print(*fab(int(input())))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Environment Base Class
__author__: Conor Heins, Alexander Tschantz, Brennan Klein
"""
class Env(object):
def reset(self, state=None):
raise NotImplementedError
def step(self, action):
raise NotImplementedError
def render(self):
pass
def sample_action(self):
pass
def get_likelihood_dist(self):
raise ValueError(
"<{}> does not provide a model specification".format(type(self).__name__)
)
def get_transition_dist(self):
raise ValueError(
"<{}> does not provide a model specification".format(type(self).__name__)
)
def get_uniform_posterior(self):
raise ValueError(
"<{}> does not provide a model specification".format(type(self).__name__)
)
def get_rand_likelihood_dist(self):
raise ValueError(
"<{}> does not provide a model specification".format(type(self).__name__)
)
def get_rand_transition_dist(self):
raise ValueError(
"<{}> does not provide a model specification".format(type(self).__name__)
)
def __str__(self):
return "<{} instance>".format(type(self).__name__)
|
""" Environment Base Class
__author__: Conor Heins, Alexander Tschantz, Brennan Klein
"""
class Env(object):
def reset(self, state=None):
raise NotImplementedError
def step(self, action):
raise NotImplementedError
def render(self):
pass
def sample_action(self):
pass
def get_likelihood_dist(self):
raise value_error('<{}> does not provide a model specification'.format(type(self).__name__))
def get_transition_dist(self):
raise value_error('<{}> does not provide a model specification'.format(type(self).__name__))
def get_uniform_posterior(self):
raise value_error('<{}> does not provide a model specification'.format(type(self).__name__))
def get_rand_likelihood_dist(self):
raise value_error('<{}> does not provide a model specification'.format(type(self).__name__))
def get_rand_transition_dist(self):
raise value_error('<{}> does not provide a model specification'.format(type(self).__name__))
def __str__(self):
return '<{} instance>'.format(type(self).__name__)
|
class Solution:
def largestValsFromLabels(self, values, labels, num_wanted, use_limit):
zipped = list(zip(values, labels))
_dict = {x: 0 for x in set(labels)}
ans = 0
for v, l in reversed(sorted(zipped)):
if num_wanted == 0:
return ans
if _dict[l] + 1 <= use_limit:
ans += v
num_wanted -= 1
_dict[l] += 1
return ans
|
class Solution:
def largest_vals_from_labels(self, values, labels, num_wanted, use_limit):
zipped = list(zip(values, labels))
_dict = {x: 0 for x in set(labels)}
ans = 0
for (v, l) in reversed(sorted(zipped)):
if num_wanted == 0:
return ans
if _dict[l] + 1 <= use_limit:
ans += v
num_wanted -= 1
_dict[l] += 1
return ans
|
"""
Package contains:
Database Class
Decoder Class
Cleaner Class
MyHTMLParser Class
"""
|
"""
Package contains:
Database Class
Decoder Class
Cleaner Class
MyHTMLParser Class
"""
|
class Human():
sum = 0
def __init__(self, name, age):
self.name = name
self.age = age
def get_name(self):
print(self.name)
def do_homework(self):
print('parent method')
|
class Human:
sum = 0
def __init__(self, name, age):
self.name = name
self.age = age
def get_name(self):
print(self.name)
def do_homework(self):
print('parent method')
|
d = {
"no": "yes"
}
class CustomError:
def __init__(self, fun):
self.fun = fun
def __call__(self, *args, **kwargs):
try:
return self.fun(*args, **kwargs)
except Exception as e:
print(e)
raise Exception(d.get(str(e)))
@CustomError
def a():
raise Exception("no")
|
d = {'no': 'yes'}
class Customerror:
def __init__(self, fun):
self.fun = fun
def __call__(self, *args, **kwargs):
try:
return self.fun(*args, **kwargs)
except Exception as e:
print(e)
raise exception(d.get(str(e)))
@CustomError
def a():
raise exception('no')
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Documentation',
'category': 'Website',
'summary': 'Forum, Documentation',
'description': """
Documentation based on question and pertinent answers of Forum
""",
'depends': [
'website_forum'
],
'data': [
'data/doc_data.xml',
'security/ir.model.access.csv',
'views/doc.xml',
'views/website_doc.xml',
],
'demo': [
'data/doc_demo.xml',
],
}
|
{'name': 'Documentation', 'category': 'Website', 'summary': 'Forum, Documentation', 'description': '\nDocumentation based on question and pertinent answers of Forum\n ', 'depends': ['website_forum'], 'data': ['data/doc_data.xml', 'security/ir.model.access.csv', 'views/doc.xml', 'views/website_doc.xml'], 'demo': ['data/doc_demo.xml']}
|
# Databricks notebook source
# MAGIC %md
# MAGIC <img src="https://github.com/billkellett/flight-school-resources/blob/master/images/databricks icon.png?raw=true" width=100/>
# MAGIC <img src="/files/flight/Megacorp.png?raw=true" width=200/>
# MAGIC # Democratizing MegaCorp's Data
# MAGIC
# MAGIC ## MegaCorp's current challenges
# MAGIC <ul>
# MAGIC <li/>Hard to manage and scale infrastructure
# MAGIC <li/>Multiple sources of truth because of siloed data
# MAGIC <li/>Data Management and Data quality issues
# MAGIC <li/>Sub-optimal performance
# MAGIC <li/>Limited collaboration between teams
# MAGIC </ul>
# COMMAND ----------
# MAGIC %md
# MAGIC ##Databricks can help!
# MAGIC ####A unified Simple, Open, Collaborative Platform for your BI to AI needs
# MAGIC
# MAGIC <img src="https://databricks.com/wp-content/uploads/2021/10/Databricks-lakehouse-platform-2.png" width=600>
# MAGIC <img src="https://databricks.com/wp-content/uploads/2021/09/Platform-image-4.svg">
# COMMAND ----------
# This creates the "team_name" field displayed at the top of the notebook.
dbutils.widgets.text("team_name", "Enter your team's name")
# COMMAND ----------
# Note that we have factored out the setup processing into a different notebook, which we call here.
# As a flight school student, you will probably want to look at the setup notebook.
# Even though you'll want to look at it, we separated it out in order to demonstrate a best practice...
# ... you can use this technique to keep your demos shorter, and avoid boring your audience with housekeeping.
# In addition, you can save demo time by running this initial setup command before you begin your demo.
# This cell should run in a few minutes or less
team_name = dbutils.widgets.get("team_name")
setup_responses = dbutils.notebook.run("./includes/flight_school_assignment_1_setup", 0, {"team_name": team_name}).split()
local_data_path = setup_responses[0]
dbfs_data_path = setup_responses[1]
database_name = setup_responses[2]
print(f"Path to be used for Local Files: {local_data_path}")
print(f"Path to be used for DBFS Files: {dbfs_data_path}")
print(f"Database Name: {database_name}")
# COMMAND ----------
# Let's set the default database name so we don't have to specify it on every query
spark.sql(f"USE {database_name}")
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC #### Let's talk Databricks' Delta Lake
# MAGIC
# MAGIC #####Delta lake is an open-source project that enables building a Lakehouse Architecture on top of existing storage systems such as S3, ADLS, GCS, and HDFS.
# MAGIC
# MAGIC Delta Lake brings __*Performance*__ and __*Reliability*__ to Data Lakes
# MAGIC
# MAGIC Why did Delta Lake have to be invented? Let's take a look...
# MAGIC
# MAGIC <img src="https://github.com/billkellett/flight-school-resources/blob/master/images/projects_failing.png?raw=true" width=1000/>
# MAGIC
# MAGIC As the graphic above shows, Big Data Lake projects have a very high failure rate. In fact, Gartner Group estimates that 85% of these projects fail (see https://www.infoworld.com/article/3393467/4-reasons-big-data-projects-failand-4-ways-to-succeed.html ). *Why* is the failure rate so high?
# MAGIC
# MAGIC <img src="https://github.com/billkellett/flight-school-resources/blob/master/images/projects_failing_reasons.png?raw=true" width=1000/>
# MAGIC
# MAGIC The graphic above shows the main __*reliability*__ issues with data lakes. Unlike relational databases, typical data lakes are not capable of transactional (ACID) behavior. This leads to a number of reliability issues:
# MAGIC
# MAGIC - When a job fails, incomplete work is not rolled back, as it would be in a relational database. Data may be left in an inconsistent state. This issue is extremely difficult to deal with in production.
# MAGIC
# MAGIC - Data lakes typically cannot enforce schema. This is often touted as a "feature" called "schema-on-read," because it allows flexibility at data ingest time. However, when downstream jobs fail trying to read corrupt data, we have a very difficult recovery problem. It is often difficult just to find the source application that caused the problem... which makes fixing the problem even harder!
# MAGIC
# MAGIC - Relational databases allow multiple concurrent users, and ensure that each user gets a consistent view of data. Half-completed transactions never show up in the result sets of other concurrent users. This is not true in a typical data lake. Therefore, it is almost impossible to have a concurrent mix of read jobs and write jobs. This becomes an even bigger problem with streaming data, because streams typically don't pause to let other jobs run!
# MAGIC
# MAGIC Next, let's look at the key __*performance issues*__ with data lakes...
# MAGIC
# MAGIC <img src="https://github.com/billkellett/flight-school-resources/blob/master/images/projects_failing_reasons_1.png?raw=true" width=1000/>
# MAGIC
# MAGIC - We have already noted that data lakes cannot provide a consistent view of data to concurrent users. This is a reliability problem, but it is also a __*performance*__ problem because if we must run jobs one at a time, our production time window becomes extremely limited.
# MAGIC
# MAGIC - Most data lake engineers have come face-to-face with the "small-file problem." Data is typically ingested into a data lake in batches. Each batch typically becomes a separate physical file in a directory that defines a table in the lake. Over time, the number of physical files can grow to be very large. When this happens, performance suffers because opening and closing these files is a time-consuming operation.
# MAGIC
# MAGIC - Experienced relational database architects may be surprised to learn that Big Data usually cannot be indexed in the same way as relational databases. The indexes become too large to be manageable and performant. Instead, we "partition" data by putting it into sub-directories. Each partition can represent a column (or a composite set of columns) in the table. This lets us avoid scanning the entire data set... *if* our queries are based on the partition column. However, in the real world, analysts are running a wide range of queries which may or may not be based on the partition column. In these scenarios, there is no benefit to partitioning. In addition, partitioning breaks down if we choose a partition column with extremely high cardinality.
# MAGIC
# MAGIC - Data lakes typically live in cloud storage (e.g., S3 on AWS, ADLS on Azure), and these storage devices are quite slow compared to SSD disk drives. Most data lakes have no capability to cache data on faster devices, and this fact has a major impact on performance.
# MAGIC
# MAGIC __*Delta Lake was built to solve these reliability and performance problems.*__ First, let's consider how Delta Lake addresses *reliability* issues...
# MAGIC
# MAGIC <img src="https://github.com/billkellett/flight-school-resources/blob/master/images/delta_reliability.png?raw=true" width=1000/>
# MAGIC
# MAGIC Note the Key Features in the graphic above. We'll be diving into all of these capabilities as we go through the Workshop:
# MAGIC
# MAGIC - __ACID Transactions:__ Delta Lake ACID compliance ensures that half-completed transactions are never persisted in the Lake, and concurrent users never see other users' in-flight transactions.
# MAGIC
# MAGIC - __Mutations:__ Experienced relational database architects may be surprised to learn that most data lakes do not support updates and deletes. These lakes concern themselves only with data ingest, which makes error correction and backfill very difficult. In contrast, Delta Lake provides full support for Inserts, Updates, and Deletes.
# MAGIC
# MAGIC - __Schema Enforcement:__ Delta Lake provides full support for schema enforcement at write time, greatly increasing data reliability.
# MAGIC
# MAGIC - __Unified Batch and Streaming:__ Streaming data is becoming an essential capability for all enterprises. We'll see how Delta Lake supports both batch and streaming modes, and in fact blurs the line between them, enabling architects to design systems that use both batch and streaming capabilities simultaneously.
# MAGIC
# MAGIC - __Time Travel:__ unlike most data lakes, Delta Lake enables queries of data *as it existed* at a specific point in time. This has important ramifications for reliability, error recovery, and synchronization with other systems, as we shall see later in this Workshop.
# MAGIC
# MAGIC We have seen how Delta Lake enhances reliability. Next, let's see how Delta Lake optimizes __*performance*__...
# MAGIC
# MAGIC <img src="https://github.com/billkellett/flight-school-resources/blob/master/images/delta_performance.png?raw=true" width=1000/>
# MAGIC
# MAGIC Again, we'll be diving into all these capabilities throughout the Workshop. We'll be concentrating especially on features that are only available in Databricks' distribution of Delta Lake...
# MAGIC
# MAGIC - __Compaction:__ Delta Lake provides sophisticated capabilities to solve the "small-file problem" by compacting small files into larger units.
# MAGIC
# MAGIC - __Caching:__ Delta Lake transparently caches data on the SSD drives of worker nodes in a Spark cluster, greatly improving performance.
# MAGIC
# MAGIC - __Data Skipping:__ this Delta Lake feature goes far beyond the limits of mere partitioning.
# MAGIC
# MAGIC - __Z-Ordering:__ this is a brilliant alternative to traditional indexing, and further enhances Delta Lake performance.
# COMMAND ----------
# MAGIC %md
# MAGIC <img src="/files/flight/Proposed_Architecture.png?raw=true" width=1200/>
# COMMAND ----------
# Read the downloaded historical data into a dataframe
# This is MegaCorp data regarding power plant device performance. It pre-dates our new IOT effort, but we want to save this data and use it in queries.
dataPath = f"dbfs:/FileStore/flight/{team_name}/assignment_1_ingest.csv"
df = spark.read.option("header","true").option("inferSchema","true").csv(dataPath)
#display(df)
# Read the downloaded backfill data into a dataframe
# This is some backfill data that we'll need to merge into the main historical data.
backfillDataPath = f"dbfs:/FileStore/flight/{team_name}/assignment_1_backfill.csv"
df_backfill = spark.read.option("header","true").option("inferSchema","true").csv(backfillDataPath)
#display(df_backfill)
# Create a temporary view on the dataframes to enable SQL
df.createOrReplaceTempView("historical_bronze_vw")
df_backfill.createOrReplaceTempView("historical_bronze_backfill_vw")
# COMMAND ----------
# MAGIC %sql
# MAGIC
# MAGIC -- Create a Delta Lake table for the main bronze table
# MAGIC
# MAGIC DROP TABLE IF EXISTS sensor_readings_historical_bronze;
# MAGIC
# MAGIC CREATE TABLE sensor_readings_historical_bronze
# MAGIC AS SELECT * FROM historical_bronze_vw;
# COMMAND ----------
# MAGIC %sql
# MAGIC
# MAGIC -- Let's take a peek at our new bronze table
# MAGIC
# MAGIC SELECT * FROM sensor_readings_historical_bronze
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Let's count the records in the Bronze table
# MAGIC
# MAGIC SELECT COUNT(*) FROM sensor_readings_historical_bronze
# COMMAND ----------
# MAGIC %sql
# MAGIC
# MAGIC -- Analysing data? No problem! Let's take a look
# MAGIC
# MAGIC SELECT
# MAGIC count(*) as count, device_operational_status
# MAGIC FROM sensor_readings_historical_bronze
# MAGIC GROUP BY device_operational_status
# MAGIC ORDER BY count asc;
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Now let's make a query that accepts run-time parameters.
# MAGIC -- NOTE that we have set default values so that a default query will return results on this data
# MAGIC
# MAGIC CREATE WIDGET DROPDOWN PARAM_END_SECOND
# MAGIC DEFAULT '57'
# MAGIC CHOICES SELECT DISTINCT SECOND(reading_time) AS end_second FROM sensor_readings_historical_bronze ORDER BY end_second ASC;
# MAGIC CREATE WIDGET DROPDOWN PARAM_START_SECOND
# MAGIC DEFAULT '54'
# MAGIC CHOICES SELECT DISTINCT SECOND(reading_time) AS start_second FROM sensor_readings_historical_bronze ORDER BY start_second ASC;
# MAGIC CREATE WIDGET DROPDOWN PARAM_MINUTE
# MAGIC DEFAULT '18'
# MAGIC CHOICES SELECT DISTINCT MINUTE(reading_time) AS minute FROM sensor_readings_historical_bronze ORDER BY minute ASC;
# MAGIC CREATE WIDGET DROPDOWN PARAM_HOUR
# MAGIC DEFAULT '10'
# MAGIC CHOICES SELECT DISTINCT HOUR(reading_time) AS hour FROM sensor_readings_historical_bronze ORDER BY hour ASC;
# MAGIC CREATE WIDGET DROPDOWN PARAM_DAY
# MAGIC DEFAULT '23'
# MAGIC CHOICES SELECT DISTINCT DAY(reading_time) AS day FROM sensor_readings_historical_bronze ORDER BY day ASC;
# MAGIC CREATE WIDGET DROPDOWN PARAM_MONTH
# MAGIC DEFAULT '2'
# MAGIC CHOICES SELECT DISTINCT MONTH(reading_time) AS month FROM sensor_readings_historical_bronze ORDER BY month ASC;
# MAGIC CREATE WIDGET DROPDOWN PARAM_YEAR
# MAGIC DEFAULT '2015'
# MAGIC CHOICES SELECT DISTINCT YEAR(reading_time) AS year FROM sensor_readings_historical_bronze ORDER BY year ASC;
# MAGIC CREATE WIDGET DROPDOWN PARAM_DEVICE_ID
# MAGIC DEFAULT '7G007R'
# MAGIC CHOICES SELECT DISTINCT device_id FROM sensor_readings_historical_bronze ORDER BY device_id ASC;
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Let's make a query that shows another meaningful graphical view of the table
# MAGIC -- We'll parameterize this query so a Business Analyst can examine fine-grained device performance issues
# MAGIC -- Experiment with different graphical views
# MAGIC
# MAGIC SELECT
# MAGIC reading_time,
# MAGIC reading_1,
# MAGIC reading_2,
# MAGIC reading_3
# MAGIC FROM sensor_readings_historical_bronze
# MAGIC WHERE
# MAGIC device_id = getArgument("PARAM_DEVICE_ID")
# MAGIC AND
# MAGIC YEAR(reading_time) = getArgument("PARAM_YEAR")
# MAGIC AND
# MAGIC MONTH(reading_time) = getArgument("PARAM_MONTH")
# MAGIC AND
# MAGIC DAY(reading_time) = getArgument("PARAM_DAY")
# MAGIC AND
# MAGIC HOUR(reading_time) = getArgument("PARAM_HOUR")
# MAGIC AND
# MAGIC MINUTE(reading_time) = getArgument("PARAM_MINUTE")
# MAGIC AND
# MAGIC SECOND(reading_time) BETWEEN getArgument("PARAM_START_SECOND")
# MAGIC AND getArgument("PARAM_END_SECOND")
# MAGIC ORDER BY reading_time ASC
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Let's clean up that messy collection of widgets!
# MAGIC
# MAGIC REMOVE WIDGET PARAM_DEVICE_ID;
# MAGIC REMOVE WIDGET PARAM_YEAR;
# MAGIC REMOVE WIDGET PARAM_MONTH;
# MAGIC REMOVE WIDGET PARAM_DAY;
# MAGIC REMOVE WIDGET PARAM_HOUR;
# MAGIC REMOVE WIDGET PARAM_MINUTE;
# MAGIC REMOVE WIDGET PARAM_START_SECOND;
# MAGIC REMOVE WIDGET PARAM_END_SECOND;
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Let's take a peek at the backfill data
# MAGIC
# MAGIC SELECT * FROM historical_bronze_backfill_vw
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Let's count the records in the backfill data
# MAGIC
# MAGIC SELECT COUNT(*) FROM historical_bronze_backfill_vw
# COMMAND ----------
# MAGIC %md
# MAGIC <img src="https://docs.delta.io/latest/_static/delta-lake-logo.png" width=300>
# MAGIC
# MAGIC ## Let's talk Medallion Architecture an how it can help ensuring data quality
# MAGIC <img src="https://databricks.com/wp-content/uploads/2021/05/Bronze-Silver-Gold-Tables.png" width=600>
# MAGIC
# MAGIC
# MAGIC
# MAGIC MegaCorp has informed us that the Bronze historical data has a few issues. Let's deal with them and create a clean Silver table.
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Let's create a Silver table. We'll start with the Bronze data, then make several improvements
# MAGIC
# MAGIC DROP TABLE IF EXISTS sensor_readings_historical_silver;
# MAGIC
# MAGIC CREATE TABLE sensor_readings_historical_silver
# MAGIC AS SELECT * FROM historical_bronze_vw;
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Let's take a peek at our new Silver table
# MAGIC
# MAGIC SELECT * FROM sensor_readings_historical_silver
# MAGIC ORDER BY reading_time ASC
# COMMAND ----------
# MAGIC %md
# MAGIC #### Let's rectify the bad sensor readings in our data
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Let's merge in the Bronze backfill data
# MAGIC -- MERGE INTO is one of the most important differentiators for Delta Lake
# MAGIC -- The entire backfill batch will be treated as an atomic transaction,
# MAGIC -- and we can do both inserts and updates within a single batch.
# MAGIC
# MAGIC MERGE INTO sensor_readings_historical_silver AS h
# MAGIC USING historical_bronze_backfill_vw AS b
# MAGIC ON
# MAGIC h.id = b.id
# MAGIC WHEN MATCHED THEN UPDATE SET *
# MAGIC WHEN NOT MATCHED THEN INSERT *;
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Verify that the upserts worked correctly.
# MAGIC -- Newly inserted records have dates of 2015-02-21 (and id value beginning with 'ZZZ')
# MAGIC -- Updated records have id's in the backfill data that do NOT begin with 'ZZZ'.
# MAGIC -- Check a few of these, and make sure that a tiny value was added to reading_1.
# MAGIC -- In order to check, you might try something similar to...
# MAGIC -- %sql
# MAGIC select a.id, a.reading_1 as reading_1_silver, b.reading_1 as reading_1_bronze
# MAGIC from sensor_readings_historical_silver a
# MAGIC inner join sensor_readings_historical_bronze b
# MAGIC on a.id = b.id
# MAGIC where a.reading_1 <> b.reading_1
# COMMAND ----------
# MAGIC %sql
# MAGIC -- MegaCorp just informed us of some dirty data. Occasionally they would receive garbled data.
# MAGIC -- In those cases, they would put 999.99 in the readings.
# MAGIC -- Let's find these records
# MAGIC
# MAGIC SELECT *
# MAGIC FROM sensor_readings_historical_silver
# MAGIC WHERE reading_1 = 999.99
# COMMAND ----------
# MAGIC %sql
# MAGIC -- We want to fix these bogus readings. Here's the idea...
# MAGIC -- - Use a SQL window function to order the readings by time within each device
# MAGIC -- - Whenever there is a 999.99 reading, replace it with the AVERAGE of the PREVIOUS and FOLLOWING readings.
# MAGIC -- HINTS:
# MAGIC -- Window functions use an "OVER" clause... OVER (PARTITION BY ... ORDER BY )
# MAGIC -- Look up the doc for SQL functions LAG() and LEAD()
# MAGIC
# MAGIC -- We'll create a table of these interpolated readings, then later we'll merge it into the Silver table.
# MAGIC
# MAGIC DROP TABLE IF EXISTS sensor_readings_historical_interpolations;
# MAGIC
# MAGIC CREATE TABLE sensor_readings_historical_interpolations AS (
# MAGIC WITH lags_and_leads AS (
# MAGIC SELECT
# MAGIC id,
# MAGIC reading_time,
# MAGIC device_type,
# MAGIC device_id,
# MAGIC device_operational_status,
# MAGIC reading_1,
# MAGIC LAG(reading_1, 1, 0) OVER (PARTITION BY device_id ORDER BY reading_time ASC, id ASC) AS reading_1_lag,
# MAGIC LEAD(reading_1, 1, 0) OVER (PARTITION BY device_id ORDER BY reading_time ASC, id ASC) AS reading_1_lead,
# MAGIC reading_2,
# MAGIC LAG(reading_2, 1, 0) OVER (PARTITION BY device_id ORDER BY reading_time ASC, id ASC) AS reading_2_lag,
# MAGIC LEAD(reading_2, 1, 0) OVER (PARTITION BY device_id ORDER BY reading_time ASC, id ASC) AS reading_2_lead,
# MAGIC reading_3,
# MAGIC LAG(reading_3, 1, 0) OVER (PARTITION BY device_id ORDER BY reading_time ASC, id ASC) AS reading_3_lag,
# MAGIC LEAD(reading_3, 1, 0) OVER (PARTITION BY device_id ORDER BY reading_time ASC, id ASC) AS reading_3_lead
# MAGIC FROM sensor_readings_historical_silver
# MAGIC )
# MAGIC SELECT
# MAGIC id,
# MAGIC reading_time,
# MAGIC device_type,
# MAGIC device_id,
# MAGIC device_operational_status,
# MAGIC ((reading_1_lag + reading_1_lead) / 2) AS reading_1,
# MAGIC ((reading_2_lag + reading_2_lead) / 2) AS reading_2,
# MAGIC ((reading_3_lag + reading_3_lead) / 2) AS reading_3
# MAGIC FROM lags_and_leads
# MAGIC WHERE reading_1 = 999.99
# MAGIC ORDER BY id ASC
# MAGIC )
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Let's see how many interpolations we have. There should be 367 rows.
# MAGIC
# MAGIC SELECT COUNT(*) FROM sensor_readings_historical_interpolations
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Now use MERGE INTO to update the historical table
# MAGIC MERGE INTO sensor_readings_historical_silver AS s
# MAGIC USING sensor_readings_historical_interpolations AS i
# MAGIC ON
# MAGIC s.id = i.id
# MAGIC WHEN MATCHED THEN UPDATE SET *
# MAGIC WHEN NOT MATCHED THEN INSERT *;
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Now make sure we got rid of all the bogus readings.
# MAGIC -- Gee, this is fast. Why? What feature in Delta Lake is making this so speedy?
# MAGIC
# MAGIC SELECT count(*)
# MAGIC FROM sensor_readings_historical_silver
# MAGIC WHERE reading_1 = 999.99
# COMMAND ----------
# MAGIC %md
# MAGIC <img src="https://docs.delta.io/latest/_static/delta-lake-logo.png" width=300>
# MAGIC ####Time Travel - Go back to the last known stable state of your data
# COMMAND ----------
# MAGIC %sql
# MAGIC -- List all the versions of the table that are available to us
# MAGIC
# MAGIC DESCRIBE HISTORY sensor_readings_historical_silver
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Ah, version 1 should have the 999.99 values
# MAGIC
# MAGIC SELECT
# MAGIC *
# MAGIC FROM
# MAGIC sensor_readings_historical_silver
# MAGIC VERSION
# MAGIC AS OF 1
# MAGIC WHERE
# MAGIC reading_1 = 999.99;
# COMMAND ----------
dbutils.fs.help()
# COMMAND ----------
dbutils.fs.head(f"dbfs:/FileStore/flight/{team_name}/assignment_1_ingest.csv")
# COMMAND ----------
# MAGIC %md
# MAGIC <img src="https://docs.delta.io/latest/_static/delta-lake-logo.png" width=300>
# MAGIC ###Handling Schema Evolution
# COMMAND ----------
# Read the downloaded historical data into a dataframe
# This is MegaCorp data regarding power plant device performance. It pre-dates our new IOT effort, but we want to save this data and use it in queries.
dataPath = f"dbfs:/FileStore/flight/{team_name}/sensor_new_schema.csv"
df = spark.read.option("header","true").option("inferSchema","true").csv(dataPath)
display(df)
# Create a temporary view on the dataframes to enable SQL
df.createOrReplaceTempView("new_schema_bronze_vw")
# COMMAND ----------
# MAGIC %sql
# MAGIC INSERT INTO sensor_readings_historical_bronze
# MAGIC SELECT
# MAGIC *
# MAGIC FROM
# MAGIC new_schema_bronze_vw;
# COMMAND ----------
# MAGIC %sql
# MAGIC set spark.databricks.delta.schema.autoMerge.enabled=true;
# MAGIC
# MAGIC INSERT INTO sensor_readings_historical_bronze
# MAGIC SELECT
# MAGIC *
# MAGIC FROM
# MAGIC new_schema_bronze_vw;
# COMMAND ----------
# MAGIC %sql
# MAGIC SELECT
# MAGIC *
# MAGIC FROM
# MAGIC sensor_readings_historical_bronze
# MAGIC WHERE
# MAGIC reading_4 IS NOT NULL;
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Here is an example of a Gold table
# MAGIC DROP TABLE IF EXISTS sensor_readings_historical_gold_stats;
# MAGIC
# MAGIC CREATE TABLE sensor_readings_historical_gold_stats AS
# MAGIC SELECT
# MAGIC device_id
# MAGIC , avg(reading_1) as avg_1
# MAGIC , avg(reading_2) as avg_2
# MAGIC , avg(reading_3) as avg_3
# MAGIC , min(reading_1) as min_1
# MAGIC , min(reading_2) as min_2
# MAGIC , min(reading_3) as min_3
# MAGIC , max(reading_1) as max_1
# MAGIC , max(reading_2) as max_2
# MAGIC , max(reading_3) as max_3
# MAGIC FROM
# MAGIC sensor_readings_historical_silver
# MAGIC GROUP BY
# MAGIC device_id
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Range of sensor readings: minimum, maximum, and the average.
# MAGIC SELECT
# MAGIC device_id
# MAGIC , min_1
# MAGIC , max_1
# MAGIC , avg_1
# MAGIC , min_2
# MAGIC , max_2
# MAGIC , avg_2
# MAGIC , min_3
# MAGIC , max_3
# MAGIC , avg_3
# MAGIC FROM
# MAGIC sensor_readings_historical_gold_stats
# COMMAND ----------
# MAGIC %md
# MAGIC <img src="https://databricks.com/wp-content/uploads/2021/08/photon-icon.svg">
# MAGIC ###All that is great! But what about performance??
# MAGIC #### Databricks' Runtime has been consistently benchmarked order of magnitude faster comapred to OSS and other verdors' Spark as well as various other offerings
# MAGIC #### Photon take the performance to next level
# MAGIC
# MAGIC Please see the following link for further details.
# MAGIC https://databricks.com/product/photon
# MAGIC https://databricks.com/blog/2017/07/12/benchmarking-big-data-sql-platforms-in-the-cloud.html
# MAGIC https://databricks.com/blog/2021/11/02/databricks-sets-official-data-warehousing-performance-record.html
# MAGIC https://pages.databricks.com/Benchmarking-Big-Data-Platforms.html
# COMMAND ----------
# MAGIC %md
# MAGIC <img src="https://docs.delta.io/latest/_static/delta-lake-logo.png" width=300>
# MAGIC ####Delta Lake features for enhanced performance
# MAGIC
# MAGIC Let's begin with __*partition*__
# COMMAND ----------
# MAGIC %sql
# MAGIC -- DESCRIBE EXTENDED will give us some partition information, and will also tell us the location of the data
# MAGIC -- Hmmm, looks like we are not partitioned. What does that mean?
# MAGIC
# MAGIC DESCRIBE EXTENDED sensor_readings_historical_silver
# COMMAND ----------
# Let's look at the physical file layout in a non-partitioned table
dbutils.fs.ls(f"dbfs:/user/hive/warehouse/{database_name}.db/sensor_readings_historical_silver")
# As you can see, the data is just broken into a set of files, without regard to the meaning of the data
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Let's create a Silver table partitioned by Device.
# MAGIC -- Create a new table, so we can compare new and old
# MAGIC
# MAGIC DROP TABLE IF EXISTS sensor_readings_historical_silver_by_device;
# MAGIC
# MAGIC CREATE TABLE sensor_readings_historical_silver_by_device
# MAGIC PARTITIONED BY (device_id)
# MAGIC AS SELECT * FROM sensor_readings_historical_silver
# COMMAND ----------
# MAGIC %sql
# MAGIC -- We can see partition information
# MAGIC
# MAGIC DESCRIBE EXTENDED sensor_readings_historical_silver_by_device
# COMMAND ----------
# Now we have subdirectories for each device, with physical files inside them
# Will that speed up queries?
dbutils.fs.ls(f"dbfs:/user/hive/warehouse/{database_name}.db/sensor_readings_historical_silver_by_device")
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Let's create a Silver table partitioned by BOTH Date AND Hour.
# MAGIC -- Note that Delta cannot partition by expressions, so I have to explicitly create the partition columns
# MAGIC -- HINT: Use the DATE() function to extract date from a timestamp, and use the HOUR() function to extract hour from a timestamp
# MAGIC
# MAGIC DROP TABLE IF EXISTS sensor_readings_historical_silver_by_hour;
# MAGIC
# MAGIC CREATE TABLE sensor_readings_historical_silver_by_hour
# MAGIC PARTITIONED BY (reading_date, reading_hour)
# MAGIC AS SELECT
# MAGIC *
# MAGIC , DATE(reading_time) as reading_date
# MAGIC , HOUR(reading_time) as reading_hour
# MAGIC FROM
# MAGIC sensor_readings_historical_silver
# COMMAND ----------
# NOTE how the hour directories are nested within the date directories
dbutils.fs.ls(f"dbfs:/user/hive/warehouse/{database_name}.db/sensor_readings_historical_silver_by_hour/reading_date=2015-02-24")
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Let's create a Silver table partitioned by Date AND Hour AND Minute.
# MAGIC -- Note that Delta cannot partition by expressions, so I have to explicitly create the partition columns
# MAGIC
# MAGIC DROP TABLE IF EXISTS sensor_readings_historical_silver_by_hour_and_minute;
# MAGIC
# MAGIC CREATE TABLE sensor_readings_historical_silver_by_hour_and_minute
# MAGIC PARTITIONED BY (reading_date, reading_hour, reading_minute)
# MAGIC AS
# MAGIC SELECT
# MAGIC *
# MAGIC , DATE(reading_time) as reading_date
# MAGIC , HOUR(reading_time) as reading_hour
# MAGIC , MINUTE(reading_time) as reading_minute
# MAGIC FROM
# MAGIC sensor_readings_historical_silver
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Let's take a peek at our minute-partitioned table
# MAGIC
# MAGIC SELECT
# MAGIC *
# MAGIC FROM
# MAGIC sensor_readings_historical_silver_by_hour_and_minute
# MAGIC LIMIT 100
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Now let's take some timings that compare our partitioned Silver tables against the unpartitioned Silver table
# MAGIC -- Here is an example "baseline" query against the unpartitioned Silver table
# MAGIC -- (run these queries several times to get a rough average)
# MAGIC
# MAGIC SELECT
# MAGIC *
# MAGIC FROM
# MAGIC sensor_readings_historical_silver
# MAGIC WHERE
# MAGIC DATE(reading_time) = '2015-02-24'
# MAGIC AND HOUR(reading_time) = '14'
# MAGIC AND MINUTE(reading_time) = '2'
# COMMAND ----------
# MAGIC %sql
# MAGIC -- Now compare the time for the same query against a partitioned table
# MAGIC -- Think and discuss... Did both data skipping and partitioning play a part here? How could you combine data skipping and partitioning to make queries even more performant?
# MAGIC
# MAGIC SELECT
# MAGIC *
# MAGIC FROM
# MAGIC sensor_readings_historical_silver_by_hour_and_minute
# MAGIC WHERE
# MAGIC reading_date = '2015-02-24'
# MAGIC AND reading_hour = '14'
# MAGIC AND reading_minute = '2'
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC <img src="https://docs.delta.io/latest/_static/delta-lake-logo.png" width=300>
# MAGIC ####Delta Lake Caching
# COMMAND ----------
# MAGIC %sql
# MAGIC CACHE SELECT * FROM sensor_readings_historical_silver
# COMMAND ----------
# MAGIC %md
# MAGIC #Delta Live Tables
# MAGIC ##Reliable data engineering made easy
# MAGIC
# MAGIC Delta Live Tables (DLT) makes it easy to build and manage reliable data pipelines that deliver high quality data on Delta Lake. DLT helps data engineering teams simplify ETL development and management with declarative pipeline development, automatic data testing, and deep visibility for monitoring and recovery.
# MAGIC
# MAGIC Here's what Delta Live Tables do for you.
# MAGIC
# MAGIC ###More easily build and maintain data pipelines</li>
# MAGIC <img src="https://databricks.com/wp-content/uploads/2021/09/Live-Tables-Pipeline.png">
# MAGIC
# MAGIC ---
# MAGIC ###Automatic Testing
# MAGIC <img src="https://databricks.com/wp-content/uploads/2021/05/Bronze-Silver-Gold-Tables.png">
# MAGIC
# MAGIC ---
# MAGIC ###Deep visibility for monitoring and easy recovery
# MAGIC <img src="https://databricks.com/wp-content/uploads/2021/05/Pipeline-Graph.png">
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC <img src="https://docs.delta.io/latest/_static/delta-lake-logo.png" width=300>
# MAGIC Read more about Delta Lake - https://delta.io/
# MAGIC Read more about Delta Live Tables - https://databricks.com/product/delta-live-tables
# MAGIC Read more about Caching - https://docs.databricks.com/delta/optimizations/delta-cache.html
# MAGIC Read more about ZOrdering - https://docs.databricks.com/delta/optimizations/file-mgmt.html
# COMMAND ----------
# MAGIC %sql
# MAGIC drop table if exists dummy1;
# MAGIC create table dummy1 as select * from sensor_readings_historical_bronze limit 5;
# COMMAND ----------
# MAGIC %sql
# MAGIC describe history dummy1
# COMMAND ----------
dbutils.fs.ls(f"dbfs:/user/hive/warehouse/dummy1")
# COMMAND ----------
# MAGIC %sql
# MAGIC select * from dummy1 version as of 17
# COMMAND ----------
# MAGIC %sql
# MAGIC DESCRIBE HISTORY DUMMY1
# COMMAND ----------
# MAGIC %sql
# MAGIC alter table dummy1 set tblproperties(delta.logRetentionDuration="interval 0 hours")
# COMMAND ----------
# MAGIC %sql
# MAGIC show tblproperties dummy1
# COMMAND ----------
# MAGIC %sql
# MAGIC set spark.databricks.delta.retentionDurationCheck.enabled = false;
# MAGIC
# MAGIC VACUUM dummy1 RETAIN 0 HOURS
# COMMAND ----------
# MAGIC %sql
# MAGIC update dummy1 set reading_1=126 where id="34fb2d8a-5829-4036-adea-a08ccc2c260c"
# COMMAND ----------
# MAGIC %sql
# MAGIC select * from dummy1
# COMMAND ----------
# MAGIC %sql
# MAGIC optimize dummy1
# COMMAND ----------
dbutils.fs.ls(f"dbfs:/user/hive/warehouse//dummy1")
# COMMAND ----------
dbutils.fs.ls(f"dbfs:/user/hive/warehouse/dummy1/_delta_log")
# COMMAND ----------
df = spark.read.json("dbfs:/user/hive/warehouse/dummy1/_delta_log/00000000000000000010.json")
# COMMAND ----------
# MAGIC %sql
# MAGIC set spark.databricks.delta.formatCheck.enabled=false
# COMMAND ----------
# MAGIC %scala
# MAGIC
# MAGIC val df = spark.read.parquet("dbfs:/user/hive/warehouse/dummy1/part-00000-7f2a6d7c-1d44-47e1-a37c-da877c15e860-c000.snappy.parquet")
# MAGIC display(df)
# COMMAND ----------
# MAGIC %scala
# MAGIC
# MAGIC val df = spark.read.parquet("dbfs:/user/hive/warehouse/dummy1/_delta_log/00000000000000000020.checkpoint.parquet")
# MAGIC display(df)
# COMMAND ----------
df = spark.read.json("dbfs:/user/hive/warehouse//dummy1/_delta_log/00000000000000000028.json")
display(df)
##Out[59]: [FileInfo(path='dbfs:/user/hive/warehouse/dummy1/_delta_log/', name='_delta_log/', size=0),
## FileInfo(path='dbfs:/user/hive/warehouse/dummy1/part-00000-7d18212c-7240-4194-94d2-a34f56195076-c000.snappy.parquet', name='part-00000-7d18212c-7240-4194-94d2-a34f56195076-c000.snappy.parquet', size=3292),
## FileInfo(path='dbfs:/user/hive/warehouse/dummy1/part-00000-7f2a6d7c-1d44-47e1-a37c-da877c15e860-c000.snappy.parquet', name='part-00000-7f2a6d7c-1d44-47e1-a37c-da877c15e860-c000.snappy.parquet', size=3293),
## FileInfo(path='dbfs:/user/hive/warehouse/dummy1/part-00000-940cde1d-5373-4986-aa73-cd099ae021d7-c000.snappy.parquet', name='part-00000-940cde1d-5373-4986-aa73-cd099ae021d7-c000.snappy.parquet', size=3293)]
# COMMAND ----------
dbutils.fs.rm(f"dbfs:/user/hive/warehouse//dummy1/_delta_log/00000000000000000000.json")
# COMMAND ----------
# MAGIC %sql
# MAGIC describe formatted dummy1
# COMMAND ----------
# MAGIC %sql
# MAGIC VACUUM dummy1 retain 0 hours
# COMMAND ----------
|
dbutils.widgets.text('team_name', "Enter your team's name")
team_name = dbutils.widgets.get('team_name')
setup_responses = dbutils.notebook.run('./includes/flight_school_assignment_1_setup', 0, {'team_name': team_name}).split()
local_data_path = setup_responses[0]
dbfs_data_path = setup_responses[1]
database_name = setup_responses[2]
print(f'Path to be used for Local Files: {local_data_path}')
print(f'Path to be used for DBFS Files: {dbfs_data_path}')
print(f'Database Name: {database_name}')
spark.sql(f'USE {database_name}')
data_path = f'dbfs:/FileStore/flight/{team_name}/assignment_1_ingest.csv'
df = spark.read.option('header', 'true').option('inferSchema', 'true').csv(dataPath)
backfill_data_path = f'dbfs:/FileStore/flight/{team_name}/assignment_1_backfill.csv'
df_backfill = spark.read.option('header', 'true').option('inferSchema', 'true').csv(backfillDataPath)
df.createOrReplaceTempView('historical_bronze_vw')
df_backfill.createOrReplaceTempView('historical_bronze_backfill_vw')
dbutils.fs.help()
dbutils.fs.head(f'dbfs:/FileStore/flight/{team_name}/assignment_1_ingest.csv')
data_path = f'dbfs:/FileStore/flight/{team_name}/sensor_new_schema.csv'
df = spark.read.option('header', 'true').option('inferSchema', 'true').csv(dataPath)
display(df)
df.createOrReplaceTempView('new_schema_bronze_vw')
dbutils.fs.ls(f'dbfs:/user/hive/warehouse/{database_name}.db/sensor_readings_historical_silver')
dbutils.fs.ls(f'dbfs:/user/hive/warehouse/{database_name}.db/sensor_readings_historical_silver_by_device')
dbutils.fs.ls(f'dbfs:/user/hive/warehouse/{database_name}.db/sensor_readings_historical_silver_by_hour/reading_date=2015-02-24')
dbutils.fs.ls(f'dbfs:/user/hive/warehouse/dummy1')
dbutils.fs.ls(f'dbfs:/user/hive/warehouse//dummy1')
dbutils.fs.ls(f'dbfs:/user/hive/warehouse/dummy1/_delta_log')
df = spark.read.json('dbfs:/user/hive/warehouse/dummy1/_delta_log/00000000000000000010.json')
df = spark.read.json('dbfs:/user/hive/warehouse//dummy1/_delta_log/00000000000000000028.json')
display(df)
dbutils.fs.rm(f'dbfs:/user/hive/warehouse//dummy1/_delta_log/00000000000000000000.json')
|
# binary search
def binary(srchlist,srch):
"""list needs to be in ascending order to search for element"""
first = 0
last = len(srchlist)-1
while first <= last:
mid = (first + last)/2
if srch > srchlist[mid]:
first = mid+1
elif srch < srchlist[mid]:
last = mid-1
else:
return mid
return -1
|
def binary(srchlist, srch):
"""list needs to be in ascending order to search for element"""
first = 0
last = len(srchlist) - 1
while first <= last:
mid = (first + last) / 2
if srch > srchlist[mid]:
first = mid + 1
elif srch < srchlist[mid]:
last = mid - 1
else:
return mid
return -1
|
#Longest Collatz Sequence
#Solving for Project Euler.Net Problem 14.
#Given n -> n/2 (n is even)
# n -> 3n + 1 (n is odd)
#
#Which starting number, under one million, produces the longest chain?
#
#By Alex Murshak
def collatz(n):
count = 0
while n>1:
if n%2==0:
n= n/2
else:
n= 3*n+1
count += 1
return count
C_large = 0
I_large = 0
for i in range(1,1000000,1):
C = collatz(i)
if C> C_large:
C_large = C
I_large = i
print(I_large)
|
def collatz(n):
count = 0
while n > 1:
if n % 2 == 0:
n = n / 2
else:
n = 3 * n + 1
count += 1
return count
c_large = 0
i_large = 0
for i in range(1, 1000000, 1):
c = collatz(i)
if C > C_large:
c_large = C
i_large = i
print(I_large)
|
class Solution:
def romanToInt(self, s: str) -> int:
translations = {
"I": 1,
"V": 5,
"X": 10,
"L": 50,
"C": 100,
"D": 500,
"M": 1000
}
number = 0
s = s.replace("IV", "IIII").replace("IX", "VIIII")
s = s.replace("XL", "XXXX").replace("XC", "LXXXX")
s = s.replace("CD", "CCCC").replace("CM", "DCCCC")
for char in s:number += translations[char]
return number
|
class Solution:
def roman_to_int(self, s: str) -> int:
translations = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}
number = 0
s = s.replace('IV', 'IIII').replace('IX', 'VIIII')
s = s.replace('XL', 'XXXX').replace('XC', 'LXXXX')
s = s.replace('CD', 'CCCC').replace('CM', 'DCCCC')
for char in s:
number += translations[char]
return number
|
budget = float(input())
price_1kg_flour = float(input())
colored_eggs = 0
price_1pack_eggs = price_1kg_flour * 0.75
price_250ml_milk = (price_1kg_flour + (price_1kg_flour * 0.25)) / 4
price_1_bread = price_1pack_eggs + price_1kg_flour + price_250ml_milk
count_breads = int(budget // price_1_bread)
for current_bread in range(1, count_breads + 1):
colored_eggs += 3
if current_bread % 3 == 0:
lost_colored_eggs = current_bread - 2
colored_eggs -= lost_colored_eggs
money_left = budget - (count_breads * price_1_bread)
print(f"You made {count_breads} loaves of Easter bread! Now you have {colored_eggs} eggs and {money_left:.2f}BGN left.")
|
budget = float(input())
price_1kg_flour = float(input())
colored_eggs = 0
price_1pack_eggs = price_1kg_flour * 0.75
price_250ml_milk = (price_1kg_flour + price_1kg_flour * 0.25) / 4
price_1_bread = price_1pack_eggs + price_1kg_flour + price_250ml_milk
count_breads = int(budget // price_1_bread)
for current_bread in range(1, count_breads + 1):
colored_eggs += 3
if current_bread % 3 == 0:
lost_colored_eggs = current_bread - 2
colored_eggs -= lost_colored_eggs
money_left = budget - count_breads * price_1_bread
print(f'You made {count_breads} loaves of Easter bread! Now you have {colored_eggs} eggs and {money_left:.2f}BGN left.')
|
def min_value(gameState):
""" Return the game state utility if the game is over,
otherwise return the minimum value over all legal successors
# HINT: Assume that the utility is ALWAYS calculated for
player 1, NOT for the "active" player
"""
# TODO: finish this function!
if gameState.terminal_test():
return gameState.utility(0)
v = float("inf")
for a in gameState.actions():
v = min(v, max_value(gameState.result(a)))
return v
def max_value(gameState):
""" Return the game state utility if the game is over,
otherwise return the maximum value over all legal successors
# HINT: Assume that the utility is ALWAYS calculated for
player 1, NOT for the "active" player
"""
# TODO: finish this function!
if gameState.terminal_test():
return gameState.utility(0)
v = float("-inf")
for a in gameState.actions():
v = max(v, min_value(gameState.result(a)))
return v
|
def min_value(gameState):
""" Return the game state utility if the game is over,
otherwise return the minimum value over all legal successors
# HINT: Assume that the utility is ALWAYS calculated for
player 1, NOT for the "active" player
"""
if gameState.terminal_test():
return gameState.utility(0)
v = float('inf')
for a in gameState.actions():
v = min(v, max_value(gameState.result(a)))
return v
def max_value(gameState):
""" Return the game state utility if the game is over,
otherwise return the maximum value over all legal successors
# HINT: Assume that the utility is ALWAYS calculated for
player 1, NOT for the "active" player
"""
if gameState.terminal_test():
return gameState.utility(0)
v = float('-inf')
for a in gameState.actions():
v = max(v, min_value(gameState.result(a)))
return v
|
#!/usr/bin/env python3
"""
Exercise 20: Word Count
Mimic the Un*x "wc" command to count lines, words, and characters.
"""
def wc(filename):
lines = 0
words = 0
characters = 0
distinct_words = set()
with open(filename) as f:
for line in f:
lines += 1
words += len(line.split())
characters += len(line)
distinct_words.update(line.split())
print(f'Characters: {characters}.')
print(f'Words: {words}.')
print(f'Distinct Words: {len(distinct_words)}.')
print(f'Lines: {lines}.')
if __name__ == '__main__':
wc('wcfile.txt')
print()
wc('rev_eli.txt')
|
"""
Exercise 20: Word Count
Mimic the Un*x "wc" command to count lines, words, and characters.
"""
def wc(filename):
lines = 0
words = 0
characters = 0
distinct_words = set()
with open(filename) as f:
for line in f:
lines += 1
words += len(line.split())
characters += len(line)
distinct_words.update(line.split())
print(f'Characters: {characters}.')
print(f'Words: {words}.')
print(f'Distinct Words: {len(distinct_words)}.')
print(f'Lines: {lines}.')
if __name__ == '__main__':
wc('wcfile.txt')
print()
wc('rev_eli.txt')
|
# O(n) time | O(h) space - where n is the number of nodes in the Binary Tree
# and h is the height of the Binary Tree
def nodeDepths(root, depth = 0):
if root is None:
return 0
return depth + nodeDepths(root.left, depth + 1) + nodeDepths(root.right, depth + 1)
# This is the class of the input binary tree.
class BinaryTree:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
|
def node_depths(root, depth=0):
if root is None:
return 0
return depth + node_depths(root.left, depth + 1) + node_depths(root.right, depth + 1)
class Binarytree:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
|
class KeySpline(Freezable,ISealable,IFormattable):
"""
This class is used by a spline key frame to define animation progress.
KeySpline(controlPoint1: Point,controlPoint2: Point)
KeySpline()
KeySpline(x1: float,y1: float,x2: float,y2: float)
"""
def CloneCore(self,*args):
"""
CloneCore(self: KeySpline,sourceFreezable: Freezable)
Makes this instance a deep copy of the specified System.Windows.Media.Animation.KeySpline. When
copying dependency properties,this method copies resource references and data bindings (but
they might no longer resolve) but not animations or their current values.
sourceFreezable: The System.Windows.Media.Animation.KeySpline to clone.
"""
pass
def CloneCurrentValueCore(self,*args):
"""
CloneCurrentValueCore(self: KeySpline,sourceFreezable: Freezable)
Makes this instance a modifiable deep copy of the specified
System.Windows.Media.Animation.KeySpline using current property values. Resource references,
data bindings,and animations are not copied,but their current values are.
sourceFreezable: The System.Windows.Media.Animation.KeySpline to clone.
"""
pass
def CreateInstance(self,*args):
"""
CreateInstance(self: Freezable) -> Freezable
Initializes a new instance of the System.Windows.Freezable class.
Returns: The new instance.
"""
pass
def CreateInstanceCore(self,*args):
"""
CreateInstanceCore(self: KeySpline) -> Freezable
Creates a new instance of System.Windows.Media.Animation.KeySpline.
Returns: A new instance of System.Windows.Media.Animation.KeySpline.
"""
pass
def FreezeCore(self,*args):
"""
FreezeCore(self: Freezable,isChecking: bool) -> bool
Makes the System.Windows.Freezable object unmodifiable or tests whether it can be made
unmodifiable.
isChecking: true to return an indication of whether the object can be frozen (without actually freezing it);
false to actually freeze the object.
Returns: If isChecking is true,this method returns true if the System.Windows.Freezable can be made
unmodifiable,or false if it cannot be made unmodifiable. If isChecking is false,this method
returns true if the if the specified System.Windows.Freezable is now unmodifiable,or false if
it cannot be made unmodifiable.
"""
pass
def GetAsFrozenCore(self,*args):
"""
GetAsFrozenCore(self: KeySpline,sourceFreezable: Freezable)
Makes this instance a clone of the specified System.Windows.Media.Animation.KeySpline object.
sourceFreezable: The System.Windows.Media.Animation.KeySpline object to clone.
"""
pass
def GetCurrentValueAsFrozenCore(self,*args):
"""
GetCurrentValueAsFrozenCore(self: KeySpline,sourceFreezable: Freezable)
Makes this instance a frozen clone of the specified System.Windows.Media.Animation.KeySpline.
Resource references,data bindings,and animations are not copied,but their current values are.
sourceFreezable: The System.Windows.Media.Animation.KeySpline to copy and freeze.
"""
pass
def GetSplineProgress(self,linearProgress):
"""
GetSplineProgress(self: KeySpline,linearProgress: float) -> float
Calculates spline progress from a supplied linear progress.
linearProgress: The linear progress to evaluate.
Returns: The calculated spline progress.
"""
pass
def OnChanged(self,*args):
"""
OnChanged(self: KeySpline)
Called when the current System.Windows.Media.Animation.KeySpline object is modified.
"""
pass
def OnFreezablePropertyChanged(self,*args):
"""
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject,property: DependencyProperty)
This member supports the Windows Presentation Foundation (WPF) infrastructure and is not
intended to be used directly from your code.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
property: The property that changed.
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject)
Ensures that appropriate context pointers are established for a
System.Windows.DependencyObjectType data member that has just been set.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
"""
pass
def OnPropertyChanged(self,*args):
"""
OnPropertyChanged(self: Freezable,e: DependencyPropertyChangedEventArgs)
Overrides the System.Windows.DependencyObject implementation of
System.Windows.DependencyObject.OnPropertyChanged(System.Windows.DependencyPropertyChangedEventAr
gs) to also invoke any System.Windows.Freezable.Changed handlers in response to a changing
dependency property of type System.Windows.Freezable.
e: Event data that contains information about which property changed,and its old and new values.
"""
pass
def ReadPreamble(self,*args):
"""
ReadPreamble(self: Freezable)
Ensures that the System.Windows.Freezable is being accessed from a valid thread. Inheritors of
System.Windows.Freezable must call this method at the beginning of any API that reads data
members that are not dependency properties.
"""
pass
def ShouldSerializeProperty(self,*args):
"""
ShouldSerializeProperty(self: DependencyObject,dp: DependencyProperty) -> bool
Returns a value that indicates whether serialization processes should serialize the value for
the provided dependency property.
dp: The identifier for the dependency property that should be serialized.
Returns: true if the dependency property that is supplied should be value-serialized; otherwise,false.
"""
pass
def ToString(self,formatProvider=None):
"""
ToString(self: KeySpline,formatProvider: IFormatProvider) -> str
Creates a string representation of this System.Windows.Media.Animation.KeySpline based on the
supplied System.IFormatProvider.
formatProvider: The format provider to use. If provider is null,the current culture is used.
Returns: A string representation of this instance of System.Windows.Media.Animation.KeySpline.
ToString(self: KeySpline) -> str
Creates a string representation of this instance of System.Windows.Media.Animation.KeySpline
based on the current culture.
Returns: A string representation of this System.Windows.Media.Animation.KeySpline.
"""
pass
def WritePostscript(self,*args):
"""
WritePostscript(self: Freezable)
Raises the System.Windows.Freezable.Changed event for the System.Windows.Freezable and invokes
its System.Windows.Freezable.OnChanged method. Classes that derive from System.Windows.Freezable
should call this method at the end of any API that modifies class members that are not stored as
dependency properties.
"""
pass
def WritePreamble(self,*args):
"""
WritePreamble(self: Freezable)
Verifies that the System.Windows.Freezable is not frozen and that it is being accessed from a
valid threading context. System.Windows.Freezable inheritors should call this method at the
beginning of any API that writes to data members that are not dependency properties.
"""
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__(cls: type)
__new__(cls: type,x1: float,y1: float,x2: float,y2: float)
__new__(cls: type,controlPoint1: Point,controlPoint2: Point)
"""
pass
def __str__(self,*args):
pass
ControlPoint1=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The first control point used to define a Bezier curve that describes a System.Windows.Media.Animation.KeySpline.
Get: ControlPoint1(self: KeySpline) -> Point
Set: ControlPoint1(self: KeySpline)=value
"""
ControlPoint2=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""The second control point used to define a Bezier curve that describes a System.Windows.Media.Animation.KeySpline.
Get: ControlPoint2(self: KeySpline) -> Point
Set: ControlPoint2(self: KeySpline)=value
"""
|
class Keyspline(Freezable, ISealable, IFormattable):
"""
This class is used by a spline key frame to define animation progress.
KeySpline(controlPoint1: Point,controlPoint2: Point)
KeySpline()
KeySpline(x1: float,y1: float,x2: float,y2: float)
"""
def clone_core(self, *args):
"""
CloneCore(self: KeySpline,sourceFreezable: Freezable)
Makes this instance a deep copy of the specified System.Windows.Media.Animation.KeySpline. When
copying dependency properties,this method copies resource references and data bindings (but
they might no longer resolve) but not animations or their current values.
sourceFreezable: The System.Windows.Media.Animation.KeySpline to clone.
"""
pass
def clone_current_value_core(self, *args):
"""
CloneCurrentValueCore(self: KeySpline,sourceFreezable: Freezable)
Makes this instance a modifiable deep copy of the specified
System.Windows.Media.Animation.KeySpline using current property values. Resource references,
data bindings,and animations are not copied,but their current values are.
sourceFreezable: The System.Windows.Media.Animation.KeySpline to clone.
"""
pass
def create_instance(self, *args):
"""
CreateInstance(self: Freezable) -> Freezable
Initializes a new instance of the System.Windows.Freezable class.
Returns: The new instance.
"""
pass
def create_instance_core(self, *args):
"""
CreateInstanceCore(self: KeySpline) -> Freezable
Creates a new instance of System.Windows.Media.Animation.KeySpline.
Returns: A new instance of System.Windows.Media.Animation.KeySpline.
"""
pass
def freeze_core(self, *args):
"""
FreezeCore(self: Freezable,isChecking: bool) -> bool
Makes the System.Windows.Freezable object unmodifiable or tests whether it can be made
unmodifiable.
isChecking: true to return an indication of whether the object can be frozen (without actually freezing it);
false to actually freeze the object.
Returns: If isChecking is true,this method returns true if the System.Windows.Freezable can be made
unmodifiable,or false if it cannot be made unmodifiable. If isChecking is false,this method
returns true if the if the specified System.Windows.Freezable is now unmodifiable,or false if
it cannot be made unmodifiable.
"""
pass
def get_as_frozen_core(self, *args):
"""
GetAsFrozenCore(self: KeySpline,sourceFreezable: Freezable)
Makes this instance a clone of the specified System.Windows.Media.Animation.KeySpline object.
sourceFreezable: The System.Windows.Media.Animation.KeySpline object to clone.
"""
pass
def get_current_value_as_frozen_core(self, *args):
"""
GetCurrentValueAsFrozenCore(self: KeySpline,sourceFreezable: Freezable)
Makes this instance a frozen clone of the specified System.Windows.Media.Animation.KeySpline.
Resource references,data bindings,and animations are not copied,but their current values are.
sourceFreezable: The System.Windows.Media.Animation.KeySpline to copy and freeze.
"""
pass
def get_spline_progress(self, linearProgress):
"""
GetSplineProgress(self: KeySpline,linearProgress: float) -> float
Calculates spline progress from a supplied linear progress.
linearProgress: The linear progress to evaluate.
Returns: The calculated spline progress.
"""
pass
def on_changed(self, *args):
"""
OnChanged(self: KeySpline)
Called when the current System.Windows.Media.Animation.KeySpline object is modified.
"""
pass
def on_freezable_property_changed(self, *args):
"""
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject,property: DependencyProperty)
This member supports the Windows Presentation Foundation (WPF) infrastructure and is not
intended to be used directly from your code.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
property: The property that changed.
OnFreezablePropertyChanged(self: Freezable,oldValue: DependencyObject,newValue: DependencyObject)
Ensures that appropriate context pointers are established for a
System.Windows.DependencyObjectType data member that has just been set.
oldValue: The previous value of the data member.
newValue: The current value of the data member.
"""
pass
def on_property_changed(self, *args):
"""
OnPropertyChanged(self: Freezable,e: DependencyPropertyChangedEventArgs)
Overrides the System.Windows.DependencyObject implementation of
System.Windows.DependencyObject.OnPropertyChanged(System.Windows.DependencyPropertyChangedEventAr
gs) to also invoke any System.Windows.Freezable.Changed handlers in response to a changing
dependency property of type System.Windows.Freezable.
e: Event data that contains information about which property changed,and its old and new values.
"""
pass
def read_preamble(self, *args):
"""
ReadPreamble(self: Freezable)
Ensures that the System.Windows.Freezable is being accessed from a valid thread. Inheritors of
System.Windows.Freezable must call this method at the beginning of any API that reads data
members that are not dependency properties.
"""
pass
def should_serialize_property(self, *args):
"""
ShouldSerializeProperty(self: DependencyObject,dp: DependencyProperty) -> bool
Returns a value that indicates whether serialization processes should serialize the value for
the provided dependency property.
dp: The identifier for the dependency property that should be serialized.
Returns: true if the dependency property that is supplied should be value-serialized; otherwise,false.
"""
pass
def to_string(self, formatProvider=None):
"""
ToString(self: KeySpline,formatProvider: IFormatProvider) -> str
Creates a string representation of this System.Windows.Media.Animation.KeySpline based on the
supplied System.IFormatProvider.
formatProvider: The format provider to use. If provider is null,the current culture is used.
Returns: A string representation of this instance of System.Windows.Media.Animation.KeySpline.
ToString(self: KeySpline) -> str
Creates a string representation of this instance of System.Windows.Media.Animation.KeySpline
based on the current culture.
Returns: A string representation of this System.Windows.Media.Animation.KeySpline.
"""
pass
def write_postscript(self, *args):
"""
WritePostscript(self: Freezable)
Raises the System.Windows.Freezable.Changed event for the System.Windows.Freezable and invokes
its System.Windows.Freezable.OnChanged method. Classes that derive from System.Windows.Freezable
should call this method at the end of any API that modifies class members that are not stored as
dependency properties.
"""
pass
def write_preamble(self, *args):
"""
WritePreamble(self: Freezable)
Verifies that the System.Windows.Freezable is not frozen and that it is being accessed from a
valid threading context. System.Windows.Freezable inheritors should call this method at the
beginning of any API that writes to data members that are not dependency properties.
"""
pass
def __format__(self, *args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, *__args):
"""
__new__(cls: type)
__new__(cls: type,x1: float,y1: float,x2: float,y2: float)
__new__(cls: type,controlPoint1: Point,controlPoint2: Point)
"""
pass
def __str__(self, *args):
pass
control_point1 = property(lambda self: object(), lambda self, v: None, lambda self: None)
'The first control point used to define a Bezier curve that describes a System.Windows.Media.Animation.KeySpline.\n\n\n\nGet: ControlPoint1(self: KeySpline) -> Point\n\n\n\nSet: ControlPoint1(self: KeySpline)=value\n\n'
control_point2 = property(lambda self: object(), lambda self, v: None, lambda self: None)
'The second control point used to define a Bezier curve that describes a System.Windows.Media.Animation.KeySpline.\n\n\n\nGet: ControlPoint2(self: KeySpline) -> Point\n\n\n\nSet: ControlPoint2(self: KeySpline)=value\n\n'
|
# Lesson 1 - Hello world and entry points
#
# All languages have an "entry point"
# An entry point is where the program begins execution
# "Main" is a common keyword used to specify the entry point
#
# Common C style entry points:
# int main()
# {
# return 0;
# }
# or
# void main()
# {
# }
# Hello World is a common and simple IO program to test a language
# and teach very basic concepts like console I/O
def helloWorld():
print("Hello World")
if __name__ == '__main__':
helloWorld()
|
def hello_world():
print('Hello World')
if __name__ == '__main__':
hello_world()
|
PASSWD = '12345'
def password_required(func):
def wrapper():
password = input('Cual es el passwd ? ')
if password == PASSWD:
return func()
else:
print('error')
return wrapper
def p_decorate(func):
def func_wrapper(name):
return "<p>{0}</p>".format(func(name))
return func_wrapper
def strong_decorate(func):
def func_wrapper(name):
return "<strong>{0}</strong>".format(func(name))
return func_wrapper
def div_decorate(func):
def func_wrapper(name):
return "<div>{0}</div>".format(func(name))
return func_wrapper
def upper_dec(func):
def wrapper(*args,**kwargs):
result = func(*args,**kwargs)
return result.upper()
return wrapper
@upper_dec
def get_my_name(name):
return "My name is {0}".format(name)
@div_decorate
@p_decorate
@strong_decorate
def get_text(name):
return "lorem ipsum, {0} dolor sit amet".format(name)
a = get_text
@password_required
def needs_password():
print('la contra esta correcta!!')
########################################################
## test general arguments "*args,**kwargs"
def test_valor_kwargs(**kwargs):
if kwargs is not None:
for key, value in kwargs.items():
print('%s == %s' %(key,value))
print(type(kwargs))
def test_valor_args(n_arg, *args):
print('primer valor normal: ', n_arg)
for arg in args:
print('este es un valor de *args: ',arg)
print(type(args))
def test_valor_kwargs_args(*args, **kwargs):
print(type(kwargs))
print(kwargs)
print('----------')
print(type(args))
print(args)
################################################################################
### example decorators in classes
def p_decorate_cla(func):
def func_wrapper(*args, **kwargs):
return "<p>{0}</p>".format(func(*args, **kwargs))
return func_wrapper
class Person():
def __init__(self):
self.name = "John"
self.family = "Doe"
@p_decorate_cla
def get_fullname(self):
return self.name+" "+self.family
if __name__ == '__main__':
#print(get_my_name('johan'))
print(a('johan'))
#test_valor_kwargs(caricatura='batman')
#test_valor_args('carlos','Karla','Paola','Elena',[1,2,3,5,1])
#test_valor_kwargs_args('flash', 'batman',[1,2,3,5,1], caricatura='batman', empresa = 'dc')
#
#my_person = Person()
#print(my_person.get_fullname())
#needs_password()
#print(get_text('johan'))
|
passwd = '12345'
def password_required(func):
def wrapper():
password = input('Cual es el passwd ? ')
if password == PASSWD:
return func()
else:
print('error')
return wrapper
def p_decorate(func):
def func_wrapper(name):
return '<p>{0}</p>'.format(func(name))
return func_wrapper
def strong_decorate(func):
def func_wrapper(name):
return '<strong>{0}</strong>'.format(func(name))
return func_wrapper
def div_decorate(func):
def func_wrapper(name):
return '<div>{0}</div>'.format(func(name))
return func_wrapper
def upper_dec(func):
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
return result.upper()
return wrapper
@upper_dec
def get_my_name(name):
return 'My name is {0}'.format(name)
@div_decorate
@p_decorate
@strong_decorate
def get_text(name):
return 'lorem ipsum, {0} dolor sit amet'.format(name)
a = get_text
@password_required
def needs_password():
print('la contra esta correcta!!')
def test_valor_kwargs(**kwargs):
if kwargs is not None:
for (key, value) in kwargs.items():
print('%s == %s' % (key, value))
print(type(kwargs))
def test_valor_args(n_arg, *args):
print('primer valor normal: ', n_arg)
for arg in args:
print('este es un valor de *args: ', arg)
print(type(args))
def test_valor_kwargs_args(*args, **kwargs):
print(type(kwargs))
print(kwargs)
print('----------')
print(type(args))
print(args)
def p_decorate_cla(func):
def func_wrapper(*args, **kwargs):
return '<p>{0}</p>'.format(func(*args, **kwargs))
return func_wrapper
class Person:
def __init__(self):
self.name = 'John'
self.family = 'Doe'
@p_decorate_cla
def get_fullname(self):
return self.name + ' ' + self.family
if __name__ == '__main__':
print(a('johan'))
|
def euclid(n, m):
if n > m:
r = m
m = n
n = r
r = m % n
while r != 0:
m = n
n = r
r = m % n
return n
|
def euclid(n, m):
if n > m:
r = m
m = n
n = r
r = m % n
while r != 0:
m = n
n = r
r = m % n
return n
|
"""Loads the gflags library"""
# Sanitize a dependency so that it works correctly from code that includes
# Apollo as a submodule.
def clean_dep(dep):
return str(Label(dep))
def repo():
# gflags
native.new_local_repository(
name = "com_github_gflags_gflags",
build_file = clean_dep("//third_party/gflags:gflags.BUILD"),
path = "/usr/local/include/gflags",
)
#http_archive(
# name = "com_github_gflags_gflags",
# build_file = "gflags.BUILD",
# sha256 = "34af2f15cf7367513b352bdcd2493ab14ce43692d2dcd9dfc499492966c64dcf",
# strip_prefix = "gflags-2.2.2",
# urls = ["https://github.com/gflags/gflags/archive/v2.2.2.tar.gz"],
#)
|
"""Loads the gflags library"""
def clean_dep(dep):
return str(label(dep))
def repo():
native.new_local_repository(name='com_github_gflags_gflags', build_file=clean_dep('//third_party/gflags:gflags.BUILD'), path='/usr/local/include/gflags')
|
def bisection_search(arr: list, n: int) -> bool:
mid = len(arr) // 2
if len(arr) < 2:
if arr[mid] == n:
return True
else:
return False
else:
if arr[mid] == n:
return True
else:
return bisection_search(arr[:mid], n) if arr[mid] > n else bisection_search(arr[mid:], n)
print(bisection_search([1, 2, 3, 4, 5, 6, 7, 8, 9, 11], 12))
|
def bisection_search(arr: list, n: int) -> bool:
mid = len(arr) // 2
if len(arr) < 2:
if arr[mid] == n:
return True
else:
return False
elif arr[mid] == n:
return True
else:
return bisection_search(arr[:mid], n) if arr[mid] > n else bisection_search(arr[mid:], n)
print(bisection_search([1, 2, 3, 4, 5, 6, 7, 8, 9, 11], 12))
|
l1 = int(input('Digite o lado 1 '))
l2 = int(input('Digite o lado 2 '))
l3 = int(input('Digite o lado 3 '))
if l1+l2>l3 and l1+l3>l2 and l2+l3>l1:
print(f'O triangulo PODE ser formado')
else:
print(f'O triangulo NAO PODE ser formado')
|
l1 = int(input('Digite o lado 1 '))
l2 = int(input('Digite o lado 2 '))
l3 = int(input('Digite o lado 3 '))
if l1 + l2 > l3 and l1 + l3 > l2 and (l2 + l3 > l1):
print(f'O triangulo PODE ser formado')
else:
print(f'O triangulo NAO PODE ser formado')
|
'''
02 - Creating two-factor
Let's continue looking at the student_data dataset of students
in secondary school. Here, we want to answer the following question:
does a student's first semester grade ("G1") tend to correlate with
their final grade ("G3")?
There are many aspects of a student's life that could result in a higher
or lower final grade in the class. For example, some students receive extra
educational support from their school ("schoolsup") or from their family
("famsup"), which could result in higher grades. Let's try to control for
these two factors by creating subplots based on whether the student received
extra educational support from their school or family.
Seaborn has been imported as sns and matplotlib.pyplot has been imported as plt.
Instructions 1/3
- Use relplot() to create a scatter plot with "G1" on the x-axis and "G3" on the
y-axis, using the student_data DataFrame.
'''
# Create a scatter plot of G1 vs. G3
sns.relplot(x="G1", y="G3",
data=student_data,
kind="scatter",
)
# Show plot
plt.show()
'''
Instructions 2/3
- Create column subplots based on whether the student received support from the
school ("schoolsup"), ordered so that "yes" comes before "no".
'''
# Adjust to add subplots based on school support
sns.relplot(x="G1", y="G3",
data=student_data,
kind="scatter",
col="schoolsup",
col_order=["yes", "no"])
# Show plot
plt.show()
'''
Instructions 3/3
- Add row subplots based on whether the student received support from the family
("famsup"), ordered so that "yes" comes before "no". This will result in subplots
based on two factors.
'''
# Adjust further to add subplots based on family support
sns.relplot(x="G1", y="G3",
data=student_data,
kind="scatter",
col="schoolsup",
col_order=["yes", "no"],
row="famsup",
row_order=["yes", "no"])
# Show plot
plt.show()
|
"""
02 - Creating two-factor
Let's continue looking at the student_data dataset of students
in secondary school. Here, we want to answer the following question:
does a student's first semester grade ("G1") tend to correlate with
their final grade ("G3")?
There are many aspects of a student's life that could result in a higher
or lower final grade in the class. For example, some students receive extra
educational support from their school ("schoolsup") or from their family
("famsup"), which could result in higher grades. Let's try to control for
these two factors by creating subplots based on whether the student received
extra educational support from their school or family.
Seaborn has been imported as sns and matplotlib.pyplot has been imported as plt.
Instructions 1/3
- Use relplot() to create a scatter plot with "G1" on the x-axis and "G3" on the
y-axis, using the student_data DataFrame.
"""
sns.relplot(x='G1', y='G3', data=student_data, kind='scatter')
plt.show()
'\nInstructions 2/3\n- Create column subplots based on whether the student received support from the \n school ("schoolsup"), ordered so that "yes" comes before "no".\n'
sns.relplot(x='G1', y='G3', data=student_data, kind='scatter', col='schoolsup', col_order=['yes', 'no'])
plt.show()
'\nInstructions 3/3\n\n- Add row subplots based on whether the student received support from the family \n ("famsup"), ordered so that "yes" comes before "no". This will result in subplots \n based on two factors.\n'
sns.relplot(x='G1', y='G3', data=student_data, kind='scatter', col='schoolsup', col_order=['yes', 'no'], row='famsup', row_order=['yes', 'no'])
plt.show()
|
def f(i):
return i + 2
def g(i):
return i > 1000
def applyF_filterG(L, f, g):
"""
Assumes L is a list of integers
Assume functions f and g are defined for you.
f takes in an integer, applies a function, returns another integer
g takes in an integer, applies a Boolean function,
returns either True or False
Mutates L such that, for each element i originally in L, L contains
i if g(f(i)) returns True, and no other elements
Returns the largest element in the mutated L or -1 if the list is empty
"""
L1 = L[:]
for element in L1:
if g(f(element)) == False:
L.remove(element)
if L == [] :
return -1
else:
print(L)
return max(L)
L = [55550, 2500, 545645, 66554, 6000]
print(applyF_filterG(L, f, g))
|
def f(i):
return i + 2
def g(i):
return i > 1000
def apply_f_filter_g(L, f, g):
"""
Assumes L is a list of integers
Assume functions f and g are defined for you.
f takes in an integer, applies a function, returns another integer
g takes in an integer, applies a Boolean function,
returns either True or False
Mutates L such that, for each element i originally in L, L contains
i if g(f(i)) returns True, and no other elements
Returns the largest element in the mutated L or -1 if the list is empty
"""
l1 = L[:]
for element in L1:
if g(f(element)) == False:
L.remove(element)
if L == []:
return -1
else:
print(L)
return max(L)
l = [55550, 2500, 545645, 66554, 6000]
print(apply_f_filter_g(L, f, g))
|
#
# PySNMP MIB module CISCO-VOICE-DNIS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-VOICE-DNIS-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:03:03 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint", "ValueRangeConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
ObjectGroup, NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "NotificationGroup", "ModuleCompliance")
iso, NotificationType, Integer32, Counter64, Gauge32, Unsigned32, MibIdentifier, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, TimeTicks, ModuleIdentity, Bits, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "NotificationType", "Integer32", "Counter64", "Gauge32", "Unsigned32", "MibIdentifier", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "TimeTicks", "ModuleIdentity", "Bits", "IpAddress")
DisplayString, TextualConvention, RowStatus, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "RowStatus", "TruthValue")
ciscoVoiceDnisMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 219))
if mibBuilder.loadTexts: ciscoVoiceDnisMIB.setLastUpdated('200205010000Z')
if mibBuilder.loadTexts: ciscoVoiceDnisMIB.setOrganization('Cisco Systems, Inc.')
class DnisMapname(TextualConvention, OctetString):
status = 'current'
displayHint = '32a'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 32)
class CvE164String(TextualConvention, OctetString):
status = 'current'
displayHint = '32a'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(1, 32)
cvDnisMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 1))
cvDnisMap = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1))
cvDnisMappingTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1), )
if mibBuilder.loadTexts: cvDnisMappingTable.setStatus('current')
cvDnisMappingEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1, 1), ).setIndexNames((1, "CISCO-VOICE-DNIS-MIB", "cvDnisMappingName"))
if mibBuilder.loadTexts: cvDnisMappingEntry.setStatus('current')
cvDnisMappingName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1, 1, 1), DnisMapname().subtype(subtypeSpec=ValueSizeConstraint(1, 32)))
if mibBuilder.loadTexts: cvDnisMappingName.setStatus('current')
cvDnisMappingUrl = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1, 1, 2), DisplayString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cvDnisMappingUrl.setStatus('current')
cvDnisMappingRefresh = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("idle", 1), ("refresh", 2))).clone('idle')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cvDnisMappingRefresh.setStatus('current')
cvDnisMappingUrlAccessError = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvDnisMappingUrlAccessError.setStatus('current')
cvDnisMappingStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cvDnisMappingStatus.setStatus('current')
cvDnisNodeTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 2), )
if mibBuilder.loadTexts: cvDnisNodeTable.setStatus('current')
cvDnisNodeEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 2, 1), ).setIndexNames((0, "CISCO-VOICE-DNIS-MIB", "cvDnisMappingName"), (1, "CISCO-VOICE-DNIS-MIB", "cvDnisNumber"))
if mibBuilder.loadTexts: cvDnisNodeEntry.setStatus('current')
cvDnisNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 2, 1, 1), CvE164String())
if mibBuilder.loadTexts: cvDnisNumber.setStatus('current')
cvDnisNodeUrl = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 2, 1, 2), DisplayString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cvDnisNodeUrl.setStatus('current')
cvDnisNodeModifiable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 2, 1, 3), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cvDnisNodeModifiable.setStatus('current')
cvDnisNodeStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 2, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cvDnisNodeStatus.setStatus('current')
cvDnisMIBNotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 2))
cvDnisMIBNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 2, 0))
cvDnisMappingUrlInaccessible = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 219, 2, 0, 1)).setObjects(("CISCO-VOICE-DNIS-MIB", "cvDnisMappingUrl"), ("CISCO-VOICE-DNIS-MIB", "cvDnisMappingUrlAccessError"))
if mibBuilder.loadTexts: cvDnisMappingUrlInaccessible.setStatus('current')
cvDnisMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 3))
cvDnisMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 3, 1))
cvDnisMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 3, 2))
cvDnisMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 219, 3, 1, 1)).setObjects(("CISCO-VOICE-DNIS-MIB", "cvDnisGroup"), ("CISCO-VOICE-DNIS-MIB", "cvDnisNotificationGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cvDnisMIBCompliance = cvDnisMIBCompliance.setStatus('current')
cvDnisGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 219, 3, 2, 1)).setObjects(("CISCO-VOICE-DNIS-MIB", "cvDnisMappingUrl"), ("CISCO-VOICE-DNIS-MIB", "cvDnisMappingRefresh"), ("CISCO-VOICE-DNIS-MIB", "cvDnisMappingUrlAccessError"), ("CISCO-VOICE-DNIS-MIB", "cvDnisMappingStatus"), ("CISCO-VOICE-DNIS-MIB", "cvDnisNodeUrl"), ("CISCO-VOICE-DNIS-MIB", "cvDnisNodeModifiable"), ("CISCO-VOICE-DNIS-MIB", "cvDnisNodeStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cvDnisGroup = cvDnisGroup.setStatus('current')
cvDnisNotificationGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 219, 3, 2, 2)).setObjects(("CISCO-VOICE-DNIS-MIB", "cvDnisMappingUrlInaccessible"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cvDnisNotificationGroup = cvDnisNotificationGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-VOICE-DNIS-MIB", cvDnisNodeUrl=cvDnisNodeUrl, cvDnisMappingStatus=cvDnisMappingStatus, cvDnisMIBNotificationPrefix=cvDnisMIBNotificationPrefix, cvDnisNumber=cvDnisNumber, cvDnisMappingEntry=cvDnisMappingEntry, cvDnisMIBGroups=cvDnisMIBGroups, cvDnisNodeTable=cvDnisNodeTable, cvDnisGroup=cvDnisGroup, cvDnisMappingTable=cvDnisMappingTable, cvDnisMappingUrlInaccessible=cvDnisMappingUrlInaccessible, cvDnisMIBObjects=cvDnisMIBObjects, cvDnisMappingRefresh=cvDnisMappingRefresh, cvDnisMappingUrl=cvDnisMappingUrl, CvE164String=CvE164String, cvDnisMappingUrlAccessError=cvDnisMappingUrlAccessError, DnisMapname=DnisMapname, ciscoVoiceDnisMIB=ciscoVoiceDnisMIB, cvDnisMap=cvDnisMap, cvDnisMIBConformance=cvDnisMIBConformance, cvDnisMIBCompliances=cvDnisMIBCompliances, cvDnisMIBCompliance=cvDnisMIBCompliance, cvDnisNodeModifiable=cvDnisNodeModifiable, cvDnisNodeStatus=cvDnisNodeStatus, cvDnisMappingName=cvDnisMappingName, cvDnisNotificationGroup=cvDnisNotificationGroup, cvDnisMIBNotifications=cvDnisMIBNotifications, cvDnisNodeEntry=cvDnisNodeEntry, PYSNMP_MODULE_ID=ciscoVoiceDnisMIB)
|
(object_identifier, octet_string, integer) = mibBuilder.importSymbols('ASN1', 'ObjectIdentifier', 'OctetString', 'Integer')
(named_values,) = mibBuilder.importSymbols('ASN1-ENUMERATION', 'NamedValues')
(constraints_intersection, constraints_union, single_value_constraint, value_size_constraint, value_range_constraint) = mibBuilder.importSymbols('ASN1-REFINEMENT', 'ConstraintsIntersection', 'ConstraintsUnion', 'SingleValueConstraint', 'ValueSizeConstraint', 'ValueRangeConstraint')
(cisco_mgmt,) = mibBuilder.importSymbols('CISCO-SMI', 'ciscoMgmt')
(object_group, notification_group, module_compliance) = mibBuilder.importSymbols('SNMPv2-CONF', 'ObjectGroup', 'NotificationGroup', 'ModuleCompliance')
(iso, notification_type, integer32, counter64, gauge32, unsigned32, mib_identifier, counter32, mib_scalar, mib_table, mib_table_row, mib_table_column, object_identity, time_ticks, module_identity, bits, ip_address) = mibBuilder.importSymbols('SNMPv2-SMI', 'iso', 'NotificationType', 'Integer32', 'Counter64', 'Gauge32', 'Unsigned32', 'MibIdentifier', 'Counter32', 'MibScalar', 'MibTable', 'MibTableRow', 'MibTableColumn', 'ObjectIdentity', 'TimeTicks', 'ModuleIdentity', 'Bits', 'IpAddress')
(display_string, textual_convention, row_status, truth_value) = mibBuilder.importSymbols('SNMPv2-TC', 'DisplayString', 'TextualConvention', 'RowStatus', 'TruthValue')
cisco_voice_dnis_mib = module_identity((1, 3, 6, 1, 4, 1, 9, 9, 219))
if mibBuilder.loadTexts:
ciscoVoiceDnisMIB.setLastUpdated('200205010000Z')
if mibBuilder.loadTexts:
ciscoVoiceDnisMIB.setOrganization('Cisco Systems, Inc.')
class Dnismapname(TextualConvention, OctetString):
status = 'current'
display_hint = '32a'
subtype_spec = OctetString.subtypeSpec + value_size_constraint(0, 32)
class Cve164String(TextualConvention, OctetString):
status = 'current'
display_hint = '32a'
subtype_spec = OctetString.subtypeSpec + value_size_constraint(1, 32)
cv_dnis_mib_objects = mib_identifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 1))
cv_dnis_map = mib_identifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1))
cv_dnis_mapping_table = mib_table((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1))
if mibBuilder.loadTexts:
cvDnisMappingTable.setStatus('current')
cv_dnis_mapping_entry = mib_table_row((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1, 1)).setIndexNames((1, 'CISCO-VOICE-DNIS-MIB', 'cvDnisMappingName'))
if mibBuilder.loadTexts:
cvDnisMappingEntry.setStatus('current')
cv_dnis_mapping_name = mib_table_column((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1, 1, 1), dnis_mapname().subtype(subtypeSpec=value_size_constraint(1, 32)))
if mibBuilder.loadTexts:
cvDnisMappingName.setStatus('current')
cv_dnis_mapping_url = mib_table_column((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1, 1, 2), display_string()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
cvDnisMappingUrl.setStatus('current')
cv_dnis_mapping_refresh = mib_table_column((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1, 1, 3), integer32().subtype(subtypeSpec=constraints_union(single_value_constraint(1, 2))).clone(namedValues=named_values(('idle', 1), ('refresh', 2))).clone('idle')).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
cvDnisMappingRefresh.setStatus('current')
cv_dnis_mapping_url_access_error = mib_table_column((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1, 1, 4), display_string()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
cvDnisMappingUrlAccessError.setStatus('current')
cv_dnis_mapping_status = mib_table_column((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 1, 1, 5), row_status()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
cvDnisMappingStatus.setStatus('current')
cv_dnis_node_table = mib_table((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 2))
if mibBuilder.loadTexts:
cvDnisNodeTable.setStatus('current')
cv_dnis_node_entry = mib_table_row((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 2, 1)).setIndexNames((0, 'CISCO-VOICE-DNIS-MIB', 'cvDnisMappingName'), (1, 'CISCO-VOICE-DNIS-MIB', 'cvDnisNumber'))
if mibBuilder.loadTexts:
cvDnisNodeEntry.setStatus('current')
cv_dnis_number = mib_table_column((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 2, 1, 1), cv_e164_string())
if mibBuilder.loadTexts:
cvDnisNumber.setStatus('current')
cv_dnis_node_url = mib_table_column((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 2, 1, 2), display_string()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
cvDnisNodeUrl.setStatus('current')
cv_dnis_node_modifiable = mib_table_column((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 2, 1, 3), truth_value()).setMaxAccess('readonly')
if mibBuilder.loadTexts:
cvDnisNodeModifiable.setStatus('current')
cv_dnis_node_status = mib_table_column((1, 3, 6, 1, 4, 1, 9, 9, 219, 1, 1, 2, 1, 4), row_status()).setMaxAccess('readcreate')
if mibBuilder.loadTexts:
cvDnisNodeStatus.setStatus('current')
cv_dnis_mib_notification_prefix = mib_identifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 2))
cv_dnis_mib_notifications = mib_identifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 2, 0))
cv_dnis_mapping_url_inaccessible = notification_type((1, 3, 6, 1, 4, 1, 9, 9, 219, 2, 0, 1)).setObjects(('CISCO-VOICE-DNIS-MIB', 'cvDnisMappingUrl'), ('CISCO-VOICE-DNIS-MIB', 'cvDnisMappingUrlAccessError'))
if mibBuilder.loadTexts:
cvDnisMappingUrlInaccessible.setStatus('current')
cv_dnis_mib_conformance = mib_identifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 3))
cv_dnis_mib_compliances = mib_identifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 3, 1))
cv_dnis_mib_groups = mib_identifier((1, 3, 6, 1, 4, 1, 9, 9, 219, 3, 2))
cv_dnis_mib_compliance = module_compliance((1, 3, 6, 1, 4, 1, 9, 9, 219, 3, 1, 1)).setObjects(('CISCO-VOICE-DNIS-MIB', 'cvDnisGroup'), ('CISCO-VOICE-DNIS-MIB', 'cvDnisNotificationGroup'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cv_dnis_mib_compliance = cvDnisMIBCompliance.setStatus('current')
cv_dnis_group = object_group((1, 3, 6, 1, 4, 1, 9, 9, 219, 3, 2, 1)).setObjects(('CISCO-VOICE-DNIS-MIB', 'cvDnisMappingUrl'), ('CISCO-VOICE-DNIS-MIB', 'cvDnisMappingRefresh'), ('CISCO-VOICE-DNIS-MIB', 'cvDnisMappingUrlAccessError'), ('CISCO-VOICE-DNIS-MIB', 'cvDnisMappingStatus'), ('CISCO-VOICE-DNIS-MIB', 'cvDnisNodeUrl'), ('CISCO-VOICE-DNIS-MIB', 'cvDnisNodeModifiable'), ('CISCO-VOICE-DNIS-MIB', 'cvDnisNodeStatus'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cv_dnis_group = cvDnisGroup.setStatus('current')
cv_dnis_notification_group = notification_group((1, 3, 6, 1, 4, 1, 9, 9, 219, 3, 2, 2)).setObjects(('CISCO-VOICE-DNIS-MIB', 'cvDnisMappingUrlInaccessible'))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cv_dnis_notification_group = cvDnisNotificationGroup.setStatus('current')
mibBuilder.exportSymbols('CISCO-VOICE-DNIS-MIB', cvDnisNodeUrl=cvDnisNodeUrl, cvDnisMappingStatus=cvDnisMappingStatus, cvDnisMIBNotificationPrefix=cvDnisMIBNotificationPrefix, cvDnisNumber=cvDnisNumber, cvDnisMappingEntry=cvDnisMappingEntry, cvDnisMIBGroups=cvDnisMIBGroups, cvDnisNodeTable=cvDnisNodeTable, cvDnisGroup=cvDnisGroup, cvDnisMappingTable=cvDnisMappingTable, cvDnisMappingUrlInaccessible=cvDnisMappingUrlInaccessible, cvDnisMIBObjects=cvDnisMIBObjects, cvDnisMappingRefresh=cvDnisMappingRefresh, cvDnisMappingUrl=cvDnisMappingUrl, CvE164String=CvE164String, cvDnisMappingUrlAccessError=cvDnisMappingUrlAccessError, DnisMapname=DnisMapname, ciscoVoiceDnisMIB=ciscoVoiceDnisMIB, cvDnisMap=cvDnisMap, cvDnisMIBConformance=cvDnisMIBConformance, cvDnisMIBCompliances=cvDnisMIBCompliances, cvDnisMIBCompliance=cvDnisMIBCompliance, cvDnisNodeModifiable=cvDnisNodeModifiable, cvDnisNodeStatus=cvDnisNodeStatus, cvDnisMappingName=cvDnisMappingName, cvDnisNotificationGroup=cvDnisNotificationGroup, cvDnisMIBNotifications=cvDnisMIBNotifications, cvDnisNodeEntry=cvDnisNodeEntry, PYSNMP_MODULE_ID=ciscoVoiceDnisMIB)
|
"""
oops
"""
print(0, 1 == 1, 0)
|
"""
oops
"""
print(0, 1 == 1, 0)
|
# acronymsBuilder.py
# A program to build acronyms from a phrase
# by Tung Nguyen
def main():
# declare program function:
print("This program builds acronyms.")
print()
# prompt user to input the sentence:
sentence = input("Enter a phrase: ")
# split the sentence into a list that contains words
listWord = sentence.split()
# assign a variable that will be used to store each of the first letter of every word in the sentence
acronym = ""
# loop through the list of words in the sentence (listWord) -> pick the first letter of every word -> store it in acronym variable
for x in listWord:
acronym += x[0].upper()
# print out the result
print("The acronym is " + acronym)
main()
|
def main():
print('This program builds acronyms.')
print()
sentence = input('Enter a phrase: ')
list_word = sentence.split()
acronym = ''
for x in listWord:
acronym += x[0].upper()
print('The acronym is ' + acronym)
main()
|
# x y x y
#r1 = [[0, 0], [5, 5]]
r1 = [[-5, -5], [-2, -2]]
r2 = [[7, 1], [1, 8]]
r3 = [[-1.2, 4], [3.7, 1.1]]
def rect_intersection_area(r1, r2, r3):
area = 0
r1_p1, r1_p2 = r1
r2_p1, r2_p2 = r2
r3_p1, r3_p2 = r3
right_x_p = 0
left_x_p = 0
top_y_p = 0
bottom_y_p = 0
# check for x
# max of left
left_x_p = max(min(r2_p1[0], r2_p2[0]), min(r1_p1[0], r1_p2[0]), min(r3_p1[0], r3_p2[0]))
# min of right
right_x_p = min(max(r2_p1[0], r2_p2[0]), max(r1_p1[0], r1_p2[0]), min(r3_p1[0], r3_p2[0]))
# max of left
top_y_p = max(min(r2_p1[1], r2_p2[1]), min(r1_p1[1], r1_p2[1]), min(r3_p1[1], r3_p2[1]))
# min of right
bottom_y_p = min(max(r2_p1[1], r2_p2[1]), max(r1_p1[1], r1_p2[1]), min(r3_p1[1], r3_p2[1]))
print('left_x_p:', left_x_p)
print('right_x_p:', right_x_p)
print('top_y_p:', top_y_p)
print('bottom_y_p:', bottom_y_p)
y_length = max(top_y_p, bottom_y_p) - min(top_y_p, bottom_y_p)
x_length = max(left_x_p, right_x_p) - min(left_x_p, right_x_p)
if right_x_p < left_x_p:
# no overlap
return 0
if bottom_y_p < top_y_p:
# no overlap
return 0
area = y_length * x_length
return area
#def is_within()
print(rect_intersection_area(r1, r2, r3))
|
r1 = [[-5, -5], [-2, -2]]
r2 = [[7, 1], [1, 8]]
r3 = [[-1.2, 4], [3.7, 1.1]]
def rect_intersection_area(r1, r2, r3):
area = 0
(r1_p1, r1_p2) = r1
(r2_p1, r2_p2) = r2
(r3_p1, r3_p2) = r3
right_x_p = 0
left_x_p = 0
top_y_p = 0
bottom_y_p = 0
left_x_p = max(min(r2_p1[0], r2_p2[0]), min(r1_p1[0], r1_p2[0]), min(r3_p1[0], r3_p2[0]))
right_x_p = min(max(r2_p1[0], r2_p2[0]), max(r1_p1[0], r1_p2[0]), min(r3_p1[0], r3_p2[0]))
top_y_p = max(min(r2_p1[1], r2_p2[1]), min(r1_p1[1], r1_p2[1]), min(r3_p1[1], r3_p2[1]))
bottom_y_p = min(max(r2_p1[1], r2_p2[1]), max(r1_p1[1], r1_p2[1]), min(r3_p1[1], r3_p2[1]))
print('left_x_p:', left_x_p)
print('right_x_p:', right_x_p)
print('top_y_p:', top_y_p)
print('bottom_y_p:', bottom_y_p)
y_length = max(top_y_p, bottom_y_p) - min(top_y_p, bottom_y_p)
x_length = max(left_x_p, right_x_p) - min(left_x_p, right_x_p)
if right_x_p < left_x_p:
return 0
if bottom_y_p < top_y_p:
return 0
area = y_length * x_length
return area
print(rect_intersection_area(r1, r2, r3))
|
# https://atcoder.jp/contests/abs/tasks/practice_1
def resolve():
a = int(input())
b, c = list(map(int, input().split()))
d = input()
print("{} {}".format(a + b + c, d))
|
def resolve():
a = int(input())
(b, c) = list(map(int, input().split()))
d = input()
print('{} {}'.format(a + b + c, d))
|
class IpConfiguration:
options: object
def __init__(self, options):
self.options = options
|
class Ipconfiguration:
options: object
def __init__(self, options):
self.options = options
|
#!/usr/bin/env python3
# Compares two lexicon files providing several stats
# @author Cristian TG
# @since 2021/04/15
# Please change the value of these variables:
LEXICON_1 = 'lexicon1.txt'
LEXICON_2 = 'lexicon2.txt'
SHOW_DETAILS = True
DISAMBIGUATION_SYMBOL = '#'
###############################################################
###############################################################
#import os
def getLexicon(lexicon, path):
with open(path) as lexi:
for line in lexi:
aux = line.split('\t')
lexicon[aux[0]] = aux[1].replace("\n","").split(" ")
return lexicon
def getWords(lexicon):
words = set()
for word in lexicon:
words.add(word)
return words
def getCharacters(lexicon):
characters = set()
for word in lexicon:
for c in word:
characters.add(c)
return characters
def getPhonemes(lexicon):
phonemes = set()
disambiguation = set()
for word in lexicon:
phones = lexicon[word]
for p in phones:
if DISAMBIGUATION_SYMBOL not in p:
phonemes.add(p)
else:
disambiguation.add(p)
return phonemes, disambiguation
#############################################################
lexicon1 = getLexicon({}, LEXICON_1)
lexicon2 = getLexicon({}, LEXICON_2)
words_l1 = getWords(lexicon1)
words_l2 = getWords(lexicon2)
characters_l1 = getCharacters(lexicon1)
characters_l2 = getCharacters(lexicon2)
phonemes_l1, disambiguation_l1 = getPhonemes(lexicon1)
phonemes_l2, disambiguation_l2 = getPhonemes(lexicon2)
print("\nLEXICON_1", LEXICON_1, "LEXICON_2", LEXICON_2)
print("- Number of words:", len(words_l1),
len(words_l2), len(words_l1)-len(words_l2))
print("\n- Number of common words:", len(words_l1&words_l2))
if SHOW_DETAILS:
print(words_l1 & words_l2)
print("- Number of words included in 1 (not in 2):", len(words_l1 - words_l2))
if SHOW_DETAILS:
print(words_l1 - words_l2)
print("- Number of words included in 2 (not in 1):", len(words_l2 - words_l1))
if SHOW_DETAILS:
print(words_l2 - words_l1)
print("\n- Number of characters:", len(characters_l1),
len(characters_l2), len(characters_l1)-len(characters_l2))
print("- Number of common characters:", len(characters_l1 & characters_l2))
if SHOW_DETAILS:
print(characters_l1 & characters_l2)
print("- Number of characters included in 1 (not in 2):", len(characters_l1 - characters_l2))
if SHOW_DETAILS:
print(characters_l1 - characters_l2)
print("- Number of characters included in 2 (not in 1):",
len(characters_l2 - characters_l1))
if SHOW_DETAILS:
print(characters_l2 - characters_l1)
print("\n- Number of phonemes:", len(phonemes_l1),
len(phonemes_l2), len(phonemes_l1)-len(phonemes_l2))
print("- Number of common phonemes:", len(phonemes_l1 & phonemes_l2))
if SHOW_DETAILS:
print(phonemes_l1 & phonemes_l2)
print("- Number of phonemes included in 1 (not in 2):",
len(phonemes_l1 - phonemes_l2))
if SHOW_DETAILS:
print(phonemes_l1 - phonemes_l2)
print("- Number of phonemes included in 2 (not in 1):",
len(phonemes_l2 - phonemes_l1))
if SHOW_DETAILS:
print(phonemes_l2 - phonemes_l1)
print("\n- Number of disambiguation symbols:", len(disambiguation_l1),
len(disambiguation_l2), len(disambiguation_l1)-len(disambiguation_l2))
print("- Number of common disambiguation symbols:",
len(disambiguation_l1 & disambiguation_l2))
if SHOW_DETAILS:
print(disambiguation_l1 & disambiguation_l2)
print("- Number of disambiguation symbols included in 1 (not in 2):",
len(disambiguation_l1 - disambiguation_l2))
if SHOW_DETAILS:
print(disambiguation_l1 - disambiguation_l2)
print("- Number of disambiguation symbols included in 2 (not in 1):",
len(disambiguation_l2 - disambiguation_l1))
if SHOW_DETAILS:
print(disambiguation_l2 - disambiguation_l1)
|
lexicon_1 = 'lexicon1.txt'
lexicon_2 = 'lexicon2.txt'
show_details = True
disambiguation_symbol = '#'
def get_lexicon(lexicon, path):
with open(path) as lexi:
for line in lexi:
aux = line.split('\t')
lexicon[aux[0]] = aux[1].replace('\n', '').split(' ')
return lexicon
def get_words(lexicon):
words = set()
for word in lexicon:
words.add(word)
return words
def get_characters(lexicon):
characters = set()
for word in lexicon:
for c in word:
characters.add(c)
return characters
def get_phonemes(lexicon):
phonemes = set()
disambiguation = set()
for word in lexicon:
phones = lexicon[word]
for p in phones:
if DISAMBIGUATION_SYMBOL not in p:
phonemes.add(p)
else:
disambiguation.add(p)
return (phonemes, disambiguation)
lexicon1 = get_lexicon({}, LEXICON_1)
lexicon2 = get_lexicon({}, LEXICON_2)
words_l1 = get_words(lexicon1)
words_l2 = get_words(lexicon2)
characters_l1 = get_characters(lexicon1)
characters_l2 = get_characters(lexicon2)
(phonemes_l1, disambiguation_l1) = get_phonemes(lexicon1)
(phonemes_l2, disambiguation_l2) = get_phonemes(lexicon2)
print('\nLEXICON_1', LEXICON_1, 'LEXICON_2', LEXICON_2)
print('- Number of words:', len(words_l1), len(words_l2), len(words_l1) - len(words_l2))
print('\n- Number of common words:', len(words_l1 & words_l2))
if SHOW_DETAILS:
print(words_l1 & words_l2)
print('- Number of words included in 1 (not in 2):', len(words_l1 - words_l2))
if SHOW_DETAILS:
print(words_l1 - words_l2)
print('- Number of words included in 2 (not in 1):', len(words_l2 - words_l1))
if SHOW_DETAILS:
print(words_l2 - words_l1)
print('\n- Number of characters:', len(characters_l1), len(characters_l2), len(characters_l1) - len(characters_l2))
print('- Number of common characters:', len(characters_l1 & characters_l2))
if SHOW_DETAILS:
print(characters_l1 & characters_l2)
print('- Number of characters included in 1 (not in 2):', len(characters_l1 - characters_l2))
if SHOW_DETAILS:
print(characters_l1 - characters_l2)
print('- Number of characters included in 2 (not in 1):', len(characters_l2 - characters_l1))
if SHOW_DETAILS:
print(characters_l2 - characters_l1)
print('\n- Number of phonemes:', len(phonemes_l1), len(phonemes_l2), len(phonemes_l1) - len(phonemes_l2))
print('- Number of common phonemes:', len(phonemes_l1 & phonemes_l2))
if SHOW_DETAILS:
print(phonemes_l1 & phonemes_l2)
print('- Number of phonemes included in 1 (not in 2):', len(phonemes_l1 - phonemes_l2))
if SHOW_DETAILS:
print(phonemes_l1 - phonemes_l2)
print('- Number of phonemes included in 2 (not in 1):', len(phonemes_l2 - phonemes_l1))
if SHOW_DETAILS:
print(phonemes_l2 - phonemes_l1)
print('\n- Number of disambiguation symbols:', len(disambiguation_l1), len(disambiguation_l2), len(disambiguation_l1) - len(disambiguation_l2))
print('- Number of common disambiguation symbols:', len(disambiguation_l1 & disambiguation_l2))
if SHOW_DETAILS:
print(disambiguation_l1 & disambiguation_l2)
print('- Number of disambiguation symbols included in 1 (not in 2):', len(disambiguation_l1 - disambiguation_l2))
if SHOW_DETAILS:
print(disambiguation_l1 - disambiguation_l2)
print('- Number of disambiguation symbols included in 2 (not in 1):', len(disambiguation_l2 - disambiguation_l1))
if SHOW_DETAILS:
print(disambiguation_l2 - disambiguation_l1)
|
def three_word(a, b, c):
title = a
if title == '\"\"':
title = "\'\'"
else:
title = "\'{0}\'".format(title)
tag = b
if tag == '\"\"':
tag = "\'\'"
else:
tag = "\'{0}\'".format(tag)
description = c
if description == '\"\"':
description = "\'\'"
else:
description = "\'{0}\'".format(description)
return title, tag, description
def format_file(old_file, new_file):
try:
format_file = open(new_file, 'a')
with open(old_file, 'r') as face_file:
for line in face_file:
line = line.split('\n')[0]
title, tag, description = three_word(line.split('\t')[4], line.split('\t')[5], line.split('\t')[6])
face_data = "{0}\t\'{1}\'\t\'{2}\'\t{3}\t{4}\t{5}\t{6}\t" \
"{7}\t{8}\t{9}\t\'{10}\'\t{11}\t{12}\t{13}\t{14}\t{15}\t{16}\t" \
"{17}\t{18}\t{19}\t{20}\t{21}\t{22}\t{23}\t{24}\t{25}\n" \
.format(line.split('\t')[0],
line.split('\t')[1], line.split('\t')[2],
line.split('\t')[3], title, tag, description,
line.split('\t')[7], line.split('\t')[8],
line.split('\t')[9], line.split('\t')[10],
line.split('\t')[11], line.split('\t')[12],
line.split('\t')[13], line.split('\t')[14],
line.split('\t')[15], line.split('\t')[16],
line.split('\t')[17], line.split('\t')[18],
line.split('\t')[19],
line.split('\t')[20], line.split('\t')[21],
line.split('\t')[22], line.split('\t')[23],
line.split('\t')[24], line.split('\t')[25])
format_file.writelines(face_data)
format_file.close()
except Exception as e:
with open('log.txt', 'a') as log:
log.writelines("{0},{1}".format(new_file, e))
if __name__ == '__main__':
start =1
end = 135
for i in range(start, end):
print(i)
format_file(r'D:\Users\KYH\Desktop\EmotionMap\FlickrEmotionData\4face_all\face{0}.txt'.format(i),
r'D:\Users\KYH\Desktop\EmotionMap\FlickrEmotionData\5face_format\face{0}.txt'.format(i))
|
def three_word(a, b, c):
title = a
if title == '""':
title = "''"
else:
title = "'{0}'".format(title)
tag = b
if tag == '""':
tag = "''"
else:
tag = "'{0}'".format(tag)
description = c
if description == '""':
description = "''"
else:
description = "'{0}'".format(description)
return (title, tag, description)
def format_file(old_file, new_file):
try:
format_file = open(new_file, 'a')
with open(old_file, 'r') as face_file:
for line in face_file:
line = line.split('\n')[0]
(title, tag, description) = three_word(line.split('\t')[4], line.split('\t')[5], line.split('\t')[6])
face_data = "{0}\t'{1}'\t'{2}'\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}\t'{10}'\t{11}\t{12}\t{13}\t{14}\t{15}\t{16}\t{17}\t{18}\t{19}\t{20}\t{21}\t{22}\t{23}\t{24}\t{25}\n".format(line.split('\t')[0], line.split('\t')[1], line.split('\t')[2], line.split('\t')[3], title, tag, description, line.split('\t')[7], line.split('\t')[8], line.split('\t')[9], line.split('\t')[10], line.split('\t')[11], line.split('\t')[12], line.split('\t')[13], line.split('\t')[14], line.split('\t')[15], line.split('\t')[16], line.split('\t')[17], line.split('\t')[18], line.split('\t')[19], line.split('\t')[20], line.split('\t')[21], line.split('\t')[22], line.split('\t')[23], line.split('\t')[24], line.split('\t')[25])
format_file.writelines(face_data)
format_file.close()
except Exception as e:
with open('log.txt', 'a') as log:
log.writelines('{0},{1}'.format(new_file, e))
if __name__ == '__main__':
start = 1
end = 135
for i in range(start, end):
print(i)
format_file('D:\\Users\\KYH\\Desktop\\EmotionMap\\FlickrEmotionData\\4face_all\\face{0}.txt'.format(i), 'D:\\Users\\KYH\\Desktop\\EmotionMap\\FlickrEmotionData\\5face_format\\face{0}.txt'.format(i))
|
# [Skill] Cygnus Constellation (20899)
echo = 10001005
cygnusConstellation = 1142597
cygnus = 1101000
if sm.canHold(cygnusConstellation):
sm.setSpeakerID(cygnus)
sm.sendNext("You have exceeded all our expectations. Please take this as a symbol of your heroism.\r\n"
"#s" + str(echo) + "# #q" + str(echo) + "#\r\n"
"#i" + str(cygnusConstellation) + "# #z" + str(cygnusConstellation) + "#")
sm.completeQuest(parentID)
sm.giveSkill(echo)
sm.giveItem(cygnusConstellation)
else:
sm.setSpeakerID(cygnus)
sm.sendSayOkay("Please make room in your Equip inventory.")
|
echo = 10001005
cygnus_constellation = 1142597
cygnus = 1101000
if sm.canHold(cygnusConstellation):
sm.setSpeakerID(cygnus)
sm.sendNext('You have exceeded all our expectations. Please take this as a symbol of your heroism.\r\n#s' + str(echo) + '# #q' + str(echo) + '#\r\n#i' + str(cygnusConstellation) + '# #z' + str(cygnusConstellation) + '#')
sm.completeQuest(parentID)
sm.giveSkill(echo)
sm.giveItem(cygnusConstellation)
else:
sm.setSpeakerID(cygnus)
sm.sendSayOkay('Please make room in your Equip inventory.')
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
ans = str(int(self.combine(l1)) + int(self.combine(l2)))
return self.separate(ans, len(ans) - 1)
def combine(self, lst):
if lst.next:
return self.combine(lst.next) + str(lst.val)
else:
return str(lst.val)
def separate(self, string, i):
if i < 1:
return ListNode(int(string[0]), None)
else:
return ListNode(int(string[i]), self.separate(string, i-1))
|
class Solution:
def add_two_numbers(self, l1: ListNode, l2: ListNode) -> ListNode:
ans = str(int(self.combine(l1)) + int(self.combine(l2)))
return self.separate(ans, len(ans) - 1)
def combine(self, lst):
if lst.next:
return self.combine(lst.next) + str(lst.val)
else:
return str(lst.val)
def separate(self, string, i):
if i < 1:
return list_node(int(string[0]), None)
else:
return list_node(int(string[i]), self.separate(string, i - 1))
|
def http_exception(response):
raise HTTPException(response)
class HTTPException(Exception):
def __init__(self, response):
self._response = response
def __str__(self):
return self._response.message
@property
def is_redirect(self):
return self._response.is_redirect
@property
def is_client_error(self):
return self._response.is_client_error
@property
def is_server_error(self):
return self._response.is_server_error
|
def http_exception(response):
raise http_exception(response)
class Httpexception(Exception):
def __init__(self, response):
self._response = response
def __str__(self):
return self._response.message
@property
def is_redirect(self):
return self._response.is_redirect
@property
def is_client_error(self):
return self._response.is_client_error
@property
def is_server_error(self):
return self._response.is_server_error
|
coins = [
100,
50,
25,
5,
1
]
total = 0
change = 130
for i in range(len(coins)):
numCoins = change // coins[i]
change -= numCoins * coins[i]
total += numCoins
print(total)
'''
numCoins = 75 // 100 = 0
change = change - 0 * 100
change = change
'''
|
coins = [100, 50, 25, 5, 1]
total = 0
change = 130
for i in range(len(coins)):
num_coins = change // coins[i]
change -= numCoins * coins[i]
total += numCoins
print(total)
'\nnumCoins = 75 // 100 = 0\nchange = change - 0 * 100\nchange = change \n'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
DELIMITER = "\r\n"
def encode(*args):
"Pack a series of arguments into a value Redis command"
result = []
result.append("*")
result.append(str(len(args)))
result.append(DELIMITER)
for arg in args:
result.append("$")
result.append(str(len(arg)))
result.append(DELIMITER)
result.append(arg)
result.append(DELIMITER)
return "".join(result)
def decode(data):
processed, index = 0, data.find(DELIMITER)
if index == -1:
index = len(data)
term = data[processed]
if term == "*":
return parse_multi_chunked(data)
elif term == "$":
return parse_chunked(data)
elif term == "+":
return parse_status(data)
elif term == "-":
return parse_error(data)
elif term == ":":
return parse_integer(data)
def parse_stream(data):
cursor = 0
data_len = len(data)
result = []
while cursor < data_len:
pdata = data[cursor:]
index = pdata.find(DELIMITER)
count = int(pdata[1:index])
cmd = ''
start = index + len(DELIMITER)
for i in range(count):
chunk, length = parse_chunked(pdata, start)
start = length + len(DELIMITER)
cmd += " " + chunk
cursor += start
result.append(cmd.strip())
return result
def parse_multi_chunked(data):
index = data.find(DELIMITER)
count = int(data[1:index])
result = []
start = index + len(DELIMITER)
for i in range(count):
chunk, length = parse_chunked(data, start)
start = length + len(DELIMITER)
result.append(chunk)
return result
def parse_chunked(data, start=0):
index = data.find(DELIMITER, start)
if index == -1:
index = start
length = int(data[start + 1:index])
if length == -1:
if index + len(DELIMITER) == len(data):
return None
else:
return None, index
else:
result = data[index + len(DELIMITER):index + len(DELIMITER) + length]
return result if start == 0 else [result, index + len(DELIMITER) + length]
def parse_status(data):
return [True, data[1:]]
def parse_error(data):
return [False, data[1:]]
def parse_integer(data):
return [int(data[1:])]
if __name__ == '__main__':
print(decode(encode("ping")))
print((encode("set some value")))
print(encode("foobar"))
data = '*3\r\n$3\r\nSET\r\n$15\r\nmemtier-8232902\r\n$2\r\nxx\r\n*3\r\n$3\r\nSET\r\n$15\r\nmemtier-8232902\r\n$2\r\nxx\r\n*3\r\n$3\r\nSET\r\n$15\r\nmemtier-7630684\r\n$3\r\nAAA\r\n'
print(parse_stream(data))
|
delimiter = '\r\n'
def encode(*args):
"""Pack a series of arguments into a value Redis command"""
result = []
result.append('*')
result.append(str(len(args)))
result.append(DELIMITER)
for arg in args:
result.append('$')
result.append(str(len(arg)))
result.append(DELIMITER)
result.append(arg)
result.append(DELIMITER)
return ''.join(result)
def decode(data):
(processed, index) = (0, data.find(DELIMITER))
if index == -1:
index = len(data)
term = data[processed]
if term == '*':
return parse_multi_chunked(data)
elif term == '$':
return parse_chunked(data)
elif term == '+':
return parse_status(data)
elif term == '-':
return parse_error(data)
elif term == ':':
return parse_integer(data)
def parse_stream(data):
cursor = 0
data_len = len(data)
result = []
while cursor < data_len:
pdata = data[cursor:]
index = pdata.find(DELIMITER)
count = int(pdata[1:index])
cmd = ''
start = index + len(DELIMITER)
for i in range(count):
(chunk, length) = parse_chunked(pdata, start)
start = length + len(DELIMITER)
cmd += ' ' + chunk
cursor += start
result.append(cmd.strip())
return result
def parse_multi_chunked(data):
index = data.find(DELIMITER)
count = int(data[1:index])
result = []
start = index + len(DELIMITER)
for i in range(count):
(chunk, length) = parse_chunked(data, start)
start = length + len(DELIMITER)
result.append(chunk)
return result
def parse_chunked(data, start=0):
index = data.find(DELIMITER, start)
if index == -1:
index = start
length = int(data[start + 1:index])
if length == -1:
if index + len(DELIMITER) == len(data):
return None
else:
return (None, index)
else:
result = data[index + len(DELIMITER):index + len(DELIMITER) + length]
return result if start == 0 else [result, index + len(DELIMITER) + length]
def parse_status(data):
return [True, data[1:]]
def parse_error(data):
return [False, data[1:]]
def parse_integer(data):
return [int(data[1:])]
if __name__ == '__main__':
print(decode(encode('ping')))
print(encode('set some value'))
print(encode('foobar'))
data = '*3\r\n$3\r\nSET\r\n$15\r\nmemtier-8232902\r\n$2\r\nxx\r\n*3\r\n$3\r\nSET\r\n$15\r\nmemtier-8232902\r\n$2\r\nxx\r\n*3\r\n$3\r\nSET\r\n$15\r\nmemtier-7630684\r\n$3\r\nAAA\r\n'
print(parse_stream(data))
|
#print hello with arguments
def hello(name):
print('hello ' + name)
hello('bob')
hello('alice')
|
def hello(name):
print('hello ' + name)
hello('bob')
hello('alice')
|
n = int(input())
gerais = [e for e in input().split()]
m = int(input())
proib = [e for e in input().split()]
q = int(input())
arr = [e for e in input().split()]
for i in range(q):
key = arr[i]
esq = 0
dir = m-1
achou = False
while esq <= dir:
meio = (esq + dir) // 2
if proib[meio] == key:
achou = True
break
elif key < proib[meio]:
dir = meio - 1
else:
esq = meio + 1
if achou:
print('Proibido')
else:
print('Geral')
|
n = int(input())
gerais = [e for e in input().split()]
m = int(input())
proib = [e for e in input().split()]
q = int(input())
arr = [e for e in input().split()]
for i in range(q):
key = arr[i]
esq = 0
dir = m - 1
achou = False
while esq <= dir:
meio = (esq + dir) // 2
if proib[meio] == key:
achou = True
break
elif key < proib[meio]:
dir = meio - 1
else:
esq = meio + 1
if achou:
print('Proibido')
else:
print('Geral')
|
n = int(input())
calculadora = 1
for i in range(n):
valor, operacao = input().split()
valor = int(valor)
if(operacao == '/'):
calculadora = calculadora / valor
else:
calculadora = calculadora * valor
print("{0:.0f}".format(calculadora))
|
n = int(input())
calculadora = 1
for i in range(n):
(valor, operacao) = input().split()
valor = int(valor)
if operacao == '/':
calculadora = calculadora / valor
else:
calculadora = calculadora * valor
print('{0:.0f}'.format(calculadora))
|
BEHIND_PROXY = True
SWAGGER_BASEPATH = ""
DEFAULT_DATABASE = "dev"
DATABASES = ["test"]
ENV = "development"
DEBUG = True
|
behind_proxy = True
swagger_basepath = ''
default_database = 'dev'
databases = ['test']
env = 'development'
debug = True
|
class Solution:
def findMaxValueOfEquation(self, points: List[List[int]], k: int) -> int:
ans = -math.inf
maxHeap = [] # (y - x, x)
for x, y in points:
while maxHeap and x + maxHeap[0][1] > k:
heapq.heappop(maxHeap)
if maxHeap:
ans = max(ans, x + y - maxHeap[0][0])
heapq.heappush(maxHeap, (x - y, -x))
return ans
|
class Solution:
def find_max_value_of_equation(self, points: List[List[int]], k: int) -> int:
ans = -math.inf
max_heap = []
for (x, y) in points:
while maxHeap and x + maxHeap[0][1] > k:
heapq.heappop(maxHeap)
if maxHeap:
ans = max(ans, x + y - maxHeap[0][0])
heapq.heappush(maxHeap, (x - y, -x))
return ans
|
class Feature:
def __init__(self, value, string, infix_string, size=0, fitness=1, original_variable=False):
self.value = value
self.fitness = fitness
self.string = string
self.infix_string = infix_string
self.size = size
self.original_variable = original_variable
def __str__(self):
return self.string
|
class Feature:
def __init__(self, value, string, infix_string, size=0, fitness=1, original_variable=False):
self.value = value
self.fitness = fitness
self.string = string
self.infix_string = infix_string
self.size = size
self.original_variable = original_variable
def __str__(self):
return self.string
|
def my_range(n):
i = 0
while i <= n:
yield i
i += 1
yield 'there are no values left'
gen = my_range(4)
for i in range(7):
print(next(gen))
|
def my_range(n):
i = 0
while i <= n:
yield i
i += 1
yield 'there are no values left'
gen = my_range(4)
for i in range(7):
print(next(gen))
|
"""
pattern_stringHEAD_stringTAIL_list( "brianxxxcvbpythonvvvvvvvvvgghhbrianpppfgpython","brian","python") returns ["xxxcvb","pppfg"]
pattern_stringHEAD_stringTAIL_list( "susanvenezuelastronggghhsusancanadastrong","susan","strong") returns ["venezuela","canada"]
pattern_stringHEAD_stringTAIL_list( "boatxvmotorvvvmotorvgghmotor","boat","motor") returns ["xv"]
"""
def pattern_stringHEAD_stringTAIL_list(test_string,head_string,tail_string):
ptr_start_list=[] #pointer to a valid head
ptr_end_list=[] #pointer to a valid tail
frame_string_list=[] #list containing strings found between header and tail
for j in range(len( test_string )): #look throughout the test_string for a head and a tail noting their locations/pointers
if((test_string[j:j+len(head_string)]) == head_string): ptr_start_list = ptr_start_list + [j]
if((test_string[j:j+len(tail_string)]) == tail_string): ptr_end_list = ptr_end_list + [j]
number_of_frames = len( ptr_start_list ) #it is expected that valid strings require an equal amount of head and tail strings
if(len(ptr_end_list)<len(ptr_start_list)): number_of_frames = len( ptr_end_list ) #choose least value of pointers to determine number of valid strings available
for x in range(number_of_frames_string):
frame_string_list = frame_string_list + test_string[ (ptr_start_list[ x+len(head_string)]) : ptr_end_list[x] ] #store the valid strings in a list
return frame_string_list
|
"""
pattern_stringHEAD_stringTAIL_list( "brianxxxcvbpythonvvvvvvvvvgghhbrianpppfgpython","brian","python") returns ["xxxcvb","pppfg"]
pattern_stringHEAD_stringTAIL_list( "susanvenezuelastronggghhsusancanadastrong","susan","strong") returns ["venezuela","canada"]
pattern_stringHEAD_stringTAIL_list( "boatxvmotorvvvmotorvgghmotor","boat","motor") returns ["xv"]
"""
def pattern_string_head_string_tail_list(test_string, head_string, tail_string):
ptr_start_list = []
ptr_end_list = []
frame_string_list = []
for j in range(len(test_string)):
if test_string[j:j + len(head_string)] == head_string:
ptr_start_list = ptr_start_list + [j]
if test_string[j:j + len(tail_string)] == tail_string:
ptr_end_list = ptr_end_list + [j]
number_of_frames = len(ptr_start_list)
if len(ptr_end_list) < len(ptr_start_list):
number_of_frames = len(ptr_end_list)
for x in range(number_of_frames_string):
frame_string_list = frame_string_list + test_string[ptr_start_list[x + len(head_string)]:ptr_end_list[x]]
return frame_string_list
|
def merge_values(original: dict, new_values: dict):
"""
if a value in a dictionary is also
a dictionary we want to keep the old
information inside it
"""
for key, value in new_values.items():
if isinstance(value, dict):
original[key] = merge_values(original.get(key, dict()), value)
else:
original[key] = value
return original
|
def merge_values(original: dict, new_values: dict):
"""
if a value in a dictionary is also
a dictionary we want to keep the old
information inside it
"""
for (key, value) in new_values.items():
if isinstance(value, dict):
original[key] = merge_values(original.get(key, dict()), value)
else:
original[key] = value
return original
|
# Advent of code Year 2021 Day 02 solution
# Author = Anmol Gupta
# Date = December 2021
input = list()
with open("input.txt", "r") as input_file:
input = input_file.readlines()
def get_command(line):
splitInput = line.strip().split()
return (splitInput[0], int(splitInput[1]))
input_commands = [get_command(line) for line in input]
# 1
horizontal_position = 0
depth = 0
for action, magnitude in input_commands:
if action == "forward":
horizontal_position += magnitude
elif action == "up":
depth -= magnitude
elif action == "down":
depth += magnitude
print("Part One : " + str(horizontal_position * depth))
# 2
horizontal_position = 0
depth = 0
aim = 0
for action, magnitude in input_commands:
if action == "forward":
horizontal_position += magnitude
depth += aim * magnitude
elif action == "up":
aim -= magnitude
elif action == "down":
aim += magnitude
print("Part Two : " + str(horizontal_position * depth))
|
input = list()
with open('input.txt', 'r') as input_file:
input = input_file.readlines()
def get_command(line):
split_input = line.strip().split()
return (splitInput[0], int(splitInput[1]))
input_commands = [get_command(line) for line in input]
horizontal_position = 0
depth = 0
for (action, magnitude) in input_commands:
if action == 'forward':
horizontal_position += magnitude
elif action == 'up':
depth -= magnitude
elif action == 'down':
depth += magnitude
print('Part One : ' + str(horizontal_position * depth))
horizontal_position = 0
depth = 0
aim = 0
for (action, magnitude) in input_commands:
if action == 'forward':
horizontal_position += magnitude
depth += aim * magnitude
elif action == 'up':
aim -= magnitude
elif action == 'down':
aim += magnitude
print('Part Two : ' + str(horizontal_position * depth))
|
class nullcontext:
""" A replacement for `contextlib.nullcontext` for python versions before 3.7
"""
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
|
class Nullcontext:
""" A replacement for `contextlib.nullcontext` for python versions before 3.7
"""
def __enter__(self):
pass
def __exit__(self, exc_type, exc_val, exc_tb):
pass
|
{
'targets':
[
{
'target_name': 'node_libtiepie',
'sources':
[
'src/libtiepie.cc'
],
'include_dirs':
[
'<!(node -e "require(\'nan\')")'
],
'conditions':
[
[
'OS=="linux"',
{
'libraries': ['-ltiepie']
}
],
[
'OS=="win"',
{
'sources':
[
'src/libtiepieloader.cc'
],
'defines':
[
'LIBTIEPIE_DYNAMIC'
],
'include_dirs':
[
'<(module_root_dir)/deps/libtiepie/include'
]
}
]
]
}
]
}
|
{'targets': [{'target_name': 'node_libtiepie', 'sources': ['src/libtiepie.cc'], 'include_dirs': ['<!(node -e "require(\'nan\')")'], 'conditions': [['OS=="linux"', {'libraries': ['-ltiepie']}], ['OS=="win"', {'sources': ['src/libtiepieloader.cc'], 'defines': ['LIBTIEPIE_DYNAMIC'], 'include_dirs': ['<(module_root_dir)/deps/libtiepie/include']}]]}]}
|
def typist(s):
cur=res=0
for i in s:
next_val=int(i.isupper())
res+=1+int(next_val!=cur)
cur=next_val
return res
|
def typist(s):
cur = res = 0
for i in s:
next_val = int(i.isupper())
res += 1 + int(next_val != cur)
cur = next_val
return res
|
class ScopeCache(object):
def __init__(self, name: str):
"""
:param name: human-readable name
:param scoped_components: components managed by this scope
"""
self.name = name
def handles_component(self, component: type) -> bool:
raise NotImplementedError
def is_stored(self, component: type) -> bool:
raise NotImplementedError
def get(self, component: type) -> object:
raise NotImplementedError
def store(self, component: type, instance: object):
raise NotImplementedError
class ComponentNotHandledByScopeException(Exception):
pass
class UninstantiatedObjectException(Exception):
pass
|
class Scopecache(object):
def __init__(self, name: str):
"""
:param name: human-readable name
:param scoped_components: components managed by this scope
"""
self.name = name
def handles_component(self, component: type) -> bool:
raise NotImplementedError
def is_stored(self, component: type) -> bool:
raise NotImplementedError
def get(self, component: type) -> object:
raise NotImplementedError
def store(self, component: type, instance: object):
raise NotImplementedError
class Componentnothandledbyscopeexception(Exception):
pass
class Uninstantiatedobjectexception(Exception):
pass
|
n=int(input())
for i in range(n):
a,b,c=[int(x) for x in input().split()]
sum=a+b+c
if sum==180:
print("YES")
else:
print("NO")
|
n = int(input())
for i in range(n):
(a, b, c) = [int(x) for x in input().split()]
sum = a + b + c
if sum == 180:
print('YES')
else:
print('NO')
|
beta = 9.
gamma = 0.6
logLX = beta + gamma * logLUV
scatter = 0.4 # 0.35
# LX: monochromatic at 2 keV
# LUV: monochromatic at 2500 AA
|
beta = 9.0
gamma = 0.6
log_lx = beta + gamma * logLUV
scatter = 0.4
|
class Solution:
def isValid(self, s):
if s == '':
return True
sList = list(s)
stack = []
for chr in sList:
if len(stack) == 0:
stack.append(chr)
else:
stack.pop() if (chr == ')' and stack[-1] == '(') or (chr == ']' and stack[-1] == '[') or (
chr == '}' and stack[-1] == '{') else stack.append(chr)
return True if len(stack) == 0 else False
if __name__ == '__main__':
s = "{{)}"
hh = Solution().isValid(s)
print(hh)
|
class Solution:
def is_valid(self, s):
if s == '':
return True
s_list = list(s)
stack = []
for chr in sList:
if len(stack) == 0:
stack.append(chr)
else:
stack.pop() if chr == ')' and stack[-1] == '(' or (chr == ']' and stack[-1] == '[') or (chr == '}' and stack[-1] == '{') else stack.append(chr)
return True if len(stack) == 0 else False
if __name__ == '__main__':
s = '{{)}'
hh = solution().isValid(s)
print(hh)
|
#! /usr/bin/env python
"""
A singleton pattern implemented in python. Adapted from ActiveState Code
Recipe 52558: The Singleton Pattern implemented with Python
http://code.activestate.com/recipes/52558/
"""
class Singleton(object):
"""
A python singleton
"""
class SingletonImplementation:
"""
Implementation of the singleton interface
"""
def singletonId(self):
"""
Test method, return singleton id
"""
return id(self)
# Storage for the instance reference
__instance = None
def __init__(self):
"""
Create singleton instance
"""
# Check whether we already have an instance
if Singleton.__instance is None:
# Create and remember instance
Singleton.__instance = Singleton.SingletonImplementation()
# Store instance reference as the only member in the handle
self.__dict__['_Singleton__instance'] = Singleton.__instance
def __getattr__(self, attr):
"""
Delegate access to implementation
"""
return getattr(self.__instance, attr)
def __setattr__(self, attr, value):
"""
Delegate access to implementation
"""
return setattr(self.__instance, attr, value)
|
"""
A singleton pattern implemented in python. Adapted from ActiveState Code
Recipe 52558: The Singleton Pattern implemented with Python
http://code.activestate.com/recipes/52558/
"""
class Singleton(object):
"""
A python singleton
"""
class Singletonimplementation:
"""
Implementation of the singleton interface
"""
def singleton_id(self):
"""
Test method, return singleton id
"""
return id(self)
__instance = None
def __init__(self):
"""
Create singleton instance
"""
if Singleton.__instance is None:
Singleton.__instance = Singleton.SingletonImplementation()
self.__dict__['_Singleton__instance'] = Singleton.__instance
def __getattr__(self, attr):
"""
Delegate access to implementation
"""
return getattr(self.__instance, attr)
def __setattr__(self, attr, value):
"""
Delegate access to implementation
"""
return setattr(self.__instance, attr, value)
|
class SecretKey:
"""A wrapper class for representing secret key.
Typical format of secret key data would be [p1. p2, p3...] where pi represents
polynomials for each coefficient modulus.
Elements of each polynomails is taken from {-1, 0, 1} represented in their respective
modulus.
Attributes:
data: A 2-dim list representing secret key values.
"""
def __init__(self, data):
self.data = data
|
class Secretkey:
"""A wrapper class for representing secret key.
Typical format of secret key data would be [p1. p2, p3...] where pi represents
polynomials for each coefficient modulus.
Elements of each polynomails is taken from {-1, 0, 1} represented in their respective
modulus.
Attributes:
data: A 2-dim list representing secret key values.
"""
def __init__(self, data):
self.data = data
|
# For loops are used ot iterate over all elements of an iterable
# They use use the 'for variable in iterable' syntax
for i in range(0, 3):
# x is defined in the for loop and usable in this body of the for loop
print(i) # prints 0, 1, 2 each on a new line
for i in [10, "Hello", "World"]:
# We call this iterating through an iterable
# Here it would be called iterating through a list
print(i) # prints 10 then Hello and then World each on a new line
|
for i in range(0, 3):
print(i)
for i in [10, 'Hello', 'World']:
print(i)
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
class Fitness:
def __init__(self, criterion, *args, **kwargs):
"""
Simplest single criterion fitness object.
:param criterion: Generic container to store the fitness criterion;.
:type criterion: object
"""
self.criterion = criterion
def __gt__(self, other):
return self.criterion > other.criterion
def __le__(self, other):
return not self.__gt__(other)
def __lt__(self, other):
return not (self.__gt__(other) and self == other)
def __ge__(self, other):
return self.__gt__(other) or self == other
def __str__(self):
return "{}".format(self.criterion)
class MinimizeFitness(Fitness):
def __init__(self, criterion):
"""
Single criterion fitness object.
Instead of maximizing the criterion, we minimize it.
:param criterion: Generic container to store the fitness criterion;.
:type criterion: object
"""
super(MinimizeFitness, self).__init__(criterion)
def __gt__(self, other):
return self.criterion < other.criterion
class GapsFitness(Fitness):
def __init__(self, criterion, gaps):
"""
Dual criterion fitness object.
The first maximization *criterion* will be compared first and if the
criteron is the same, the 2nd *gap* is an inverse criterion that
requires minimization.
:param criterion: Generic container to store the fitness criterion.
:type criterion: object
"""
super(GapsFitness, self).__init__(criterion)
self.gaps = gaps
def __gt__(self, other):
if self.criterion != other.criterion:
return self.criterion > other.criterion
return self.gaps < other.gaps
def __str__(self):
return "{}\t{}".format(self.criterion, self.gaps)
|
class Fitness:
def __init__(self, criterion, *args, **kwargs):
"""
Simplest single criterion fitness object.
:param criterion: Generic container to store the fitness criterion;.
:type criterion: object
"""
self.criterion = criterion
def __gt__(self, other):
return self.criterion > other.criterion
def __le__(self, other):
return not self.__gt__(other)
def __lt__(self, other):
return not (self.__gt__(other) and self == other)
def __ge__(self, other):
return self.__gt__(other) or self == other
def __str__(self):
return '{}'.format(self.criterion)
class Minimizefitness(Fitness):
def __init__(self, criterion):
"""
Single criterion fitness object.
Instead of maximizing the criterion, we minimize it.
:param criterion: Generic container to store the fitness criterion;.
:type criterion: object
"""
super(MinimizeFitness, self).__init__(criterion)
def __gt__(self, other):
return self.criterion < other.criterion
class Gapsfitness(Fitness):
def __init__(self, criterion, gaps):
"""
Dual criterion fitness object.
The first maximization *criterion* will be compared first and if the
criteron is the same, the 2nd *gap* is an inverse criterion that
requires minimization.
:param criterion: Generic container to store the fitness criterion.
:type criterion: object
"""
super(GapsFitness, self).__init__(criterion)
self.gaps = gaps
def __gt__(self, other):
if self.criterion != other.criterion:
return self.criterion > other.criterion
return self.gaps < other.gaps
def __str__(self):
return '{}\t{}'.format(self.criterion, self.gaps)
|
weight = float(input("Please enter weight in kilograms: "))
height = float(input("Please enter height in meters: "))
bmi = weight/(height * height)
print("BMI is: ",bmi)
|
weight = float(input('Please enter weight in kilograms: '))
height = float(input('Please enter height in meters: '))
bmi = weight / (height * height)
print('BMI is: ', bmi)
|
"""
A package in which functionality specific to MAGIC H2020 project can be found
The rest of the source code should not depend on MAGIC, being base on "plain"
MuSIASEM and of course its evolution inside MAGIC project.
"""
|
"""
A package in which functionality specific to MAGIC H2020 project can be found
The rest of the source code should not depend on MAGIC, being base on "plain"
MuSIASEM and of course its evolution inside MAGIC project.
"""
|
_base_ = ['./pipelines/rand_aug.py']
# dataset settings
dataset_type = 'ImageNet'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='RandomResizedCrop',
size=224,
backend='pillow',
interpolation='bicubic'),
dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'),
dict(
type='RandAugment',
policies={{_base_.rand_increasing_policies}},
num_policies=2,
total_level=10,
magnitude_level=9,
magnitude_std=0.5,
hparams=dict(
pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]],
interpolation='bicubic')),
dict(
type='RandomErasing',
erase_prob=0.25,
mode='rand',
min_area_ratio=0.02,
max_area_ratio=1 / 3,
fill_color=img_norm_cfg['mean'][::-1],
fill_std=img_norm_cfg['std'][::-1]),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='ToTensor', keys=['gt_label']),
dict(type='Collect', keys=['img', 'gt_label'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='Resize',
size=(248, -1),
backend='pillow',
interpolation='bicubic'),
dict(type='CenterCrop', crop_size=224),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
]
data = dict(
samples_per_gpu=64,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_prefix='data/imagenet/train',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_prefix='data/imagenet/val',
# ann_file='data/imagenet/meta/val.txt',
pipeline=test_pipeline),
test=dict(
# replace `data/val` with `data/test` for standard test
type=dataset_type,
data_prefix='data/imagenet/val',
# ann_file='data/imagenet/meta/val.txt',
pipeline=test_pipeline))
|
_base_ = ['./pipelines/rand_aug.py']
dataset_type = 'ImageNet'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [dict(type='LoadImageFromFile'), dict(type='RandomResizedCrop', size=224, backend='pillow', interpolation='bicubic'), dict(type='RandomFlip', flip_prob=0.5, direction='horizontal'), dict(type='RandAugment', policies={{_base_.rand_increasing_policies}}, num_policies=2, total_level=10, magnitude_level=9, magnitude_std=0.5, hparams=dict(pad_val=[round(x) for x in img_norm_cfg['mean'][::-1]], interpolation='bicubic')), dict(type='RandomErasing', erase_prob=0.25, mode='rand', min_area_ratio=0.02, max_area_ratio=1 / 3, fill_color=img_norm_cfg['mean'][::-1], fill_std=img_norm_cfg['std'][::-1]), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='ToTensor', keys=['gt_label']), dict(type='Collect', keys=['img', 'gt_label'])]
test_pipeline = [dict(type='LoadImageFromFile'), dict(type='Resize', size=(248, -1), backend='pillow', interpolation='bicubic'), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img'])]
data = dict(samples_per_gpu=64, workers_per_gpu=4, train=dict(type=dataset_type, data_prefix='data/imagenet/train', pipeline=train_pipeline), val=dict(type=dataset_type, data_prefix='data/imagenet/val', pipeline=test_pipeline), test=dict(type=dataset_type, data_prefix='data/imagenet/val', pipeline=test_pipeline))
|
RELEVANT_EXTENSIONS = [
"java",
"c",
"cpp",
"h",
"py",
"js",
"xml",
"go",
"rb",
"php",
"sh",
"scale",
"lua",
"m",
"pl",
"ts",
"swift",
"sql",
"groovy",
"erl",
"swf",
"vue",
"bat",
"s",
"ejs",
"yaml",
"yml",
"jar",
]
ALLOWED_SITES = [
"for.testing.purposes",
"lists.apache.org",
"just.an.example.site",
"one.more.example.site",
"non-existing-url.com", # for testing.
"jvndb.jvn.jp", # for trying out: usually does not aviable, but not always, anyway it is a good example
]
|
relevant_extensions = ['java', 'c', 'cpp', 'h', 'py', 'js', 'xml', 'go', 'rb', 'php', 'sh', 'scale', 'lua', 'm', 'pl', 'ts', 'swift', 'sql', 'groovy', 'erl', 'swf', 'vue', 'bat', 's', 'ejs', 'yaml', 'yml', 'jar']
allowed_sites = ['for.testing.purposes', 'lists.apache.org', 'just.an.example.site', 'one.more.example.site', 'non-existing-url.com', 'jvndb.jvn.jp']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.