content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
"""
stanCode Breakout Project
Adapted from Eric Roberts's Breakout by
Sonja Johnson-Yu, Kylie Jue, Nick Bowman,
and Jerry Liao.
YOUR DESCRIPTION HERE
Click mouse to start the game.
When no live is remained or all bricks are cleared, game is over.
"""
from campy.gui.events.timer import pause
from breakoutgraphics import BreakoutGraphics
from campy.gui.events.mouse import onmouseclicked
FRAME_RATE = 1000 / 120 # 120 frames per second
NUM_LIVES = 3 # Number of attempts
# global variable
start_move = False
bounce_back_from_paddle = False
def main():
global start_move
global bounce_back_from_paddle
graphics = BreakoutGraphics()
lives = NUM_LIVES
bricks_number = graphics.brick_cols * graphics.brick_rows
onmouseclicked(start)
graphics_vx = graphics.get_ball_x_velocity()
graphics_vy = graphics.get_ball_y_velocity()
while True:
if start_move is True:
graphics.ball.move(graphics_vx, graphics_vy)
if graphics.ball.x <= 0 or (graphics.ball.x + graphics.ball.width) >= graphics.window.width:
graphics_vx = -graphics_vx
bounce_back_from_paddle = False
if graphics.ball.y <= 0:
graphics_vy = -graphics_vy
bounce_back_from_paddle = False
if graphics.collisions_paddle():
if bounce_back_from_paddle is False:
bounce_back_from_paddle = True
graphics_vy = -graphics_vy
if graphics.collisions_bricks():
removal = graphics.collisions_bricks()
bricks_number -= 1
graphics.window.remove(removal)
graphics_vy = -graphics_vy
bounce_back_from_paddle = False
if graphics.ball.y > graphics.window.height:
lives -= 1
graphics.reset_ball()
start_move = False
if lives == 0:
break
if bricks_number == 0:
graphics.reset_ball()
break
pause(FRAME_RATE)
def start(event):
global start_move
start_move = True
if __name__ == '__main__':
main()
|
python
|
#!/usr/bin/env python
'''
jRAT Rat Config Decoder
'''
__description__ = 'jRAT Rat Config Extractor'
__author__ = 'Kevin Breen http://techanarchy.net http://malwareconfig.com'
__version__ = '0.3'
__date__ = '2015/04/03'
#Standard Imports Go Here
import os
import sys
from base64 import b64decode
import string
from zipfile import ZipFile
from optparse import OptionParser
from io import StringIO
#Non Standard Imports
try:
from Crypto.Cipher import AES, DES3
except ImportError:
print("[+] Couldn't Import Cipher, try 'sudo pip install pycrypto'")
# Main Decode Function Goes Here
'''
data is a read of the file
Must return a python dict of values
'''
def run(data):
print("[+] Extracting Data from Jar")
enckey, conf = get_parts(data)
if enckey == None:
return
print("[+] Decoding Config with Key: {0}".format(enckey.encode('hex')))
if len(enckey) == 16:
# Newer versions use a base64 encoded config.dat
if '==' in conf: # this is not a great test but should work 99% of the time
b64_check = True
else:
b64_check = False
if b64_check:
raw_config = new_aes(conf, enckey)
else:
raw_config = old_aes(conf, enckey)
if len(enckey) in [24, 32]:
raw_config = old_des(conf, enckey)
config_dict = parse_config(raw_config, enckey)
return config_dict
#Helper Functions Go Here
# This extracts the Encryption Key and Config File from the Jar and or Dropper
def get_parts(data):
new_zip = StringIO(data)
enckey = None
dropper = None
conf = None
try:
with ZipFile(new_zip, 'r') as zip:
for name in zip.namelist(): # get all the file names
if name == "key.dat": # this file contains the encrytpion key
enckey = zip.read(name)
if name == "enc.dat": # if this file exists, jrat has an installer / dropper
dropper = zip.read(name)
if name == "config.dat": # this is the encrypted config file
conf = zip.read(name)
except:
print("[+] Dropped File is not Jar File starts with Hex Chars: {0}".format(data[:5].encode('hex')))
return None, None
if enckey and conf:
return enckey, conf
elif enckey and dropper:
newkey, conf = get_dropper(enckey, dropper)
return newkey, conf
else:
return None, None
# This extracts the Encryption Key and New conf from a 'Dropper' jar
def get_dropper(enckey, dropper):
try:
split = enckey.split('\x2c')
key = split[0][:16]
print("[+] Dropper Detected")
for x in split: # grab each line of the config and decode it.
try:
drop = b64decode(x).decode('hex')
print(" [-] {0}".format(drop).replace('\x0d\x0a',''))
except:
drop = b64decode(x[16:]).decode('hex')
print(" [-] {0}".format(drop))
new_zipdata = decrypt_aes(key, dropper)
new_key, conf = get_parts(new_zipdata)
return new_key, conf
except:
return None, None
# Returns only printable chars
def string_print(line):
return ''.join((char for char in line if 32 < ord(char) < 127))
# Messy Messy Messy
def messy_split(long_line):
# this is a messy way to split the data but it works for now.
'''
Split on = gives me the right sections but deletes the b64 padding
use modulo math to restore padding.
return new list.
'''
new_list = []
old_list = long_line.split('=')
for line in old_list:
if len(line) != 0:
line += "=" * ((4 - len(line) % 4) % 4)
new_list.append(line)
return new_list
# AES Decrypt
def decrypt_aes(enckey, data):
cipher = AES.new(enckey) # set the cipher
return cipher.decrypt(data) # decrpyt the data
# DES Decrypt
def decrypt_des(enckey, data):
cipher = DES3.new(enckey) # set the ciper
return cipher.decrypt(data) # decrpyt the data
# Process Versions 3.2.2 > 4.2.
def old_aes(conf, enckey):
decoded_config = decrypt_aes(enckey, conf)
clean_config = string_print(decoded_config)
raw_config = clean_config.split('SPLIT')
return raw_config
#Process versions 4.2. >
def new_aes(conf, enckey):
sections = messy_split(conf)
decoded_config = ''
for x in sections:
decoded_config += decrypt_aes(enckey, b64decode(x))
raw_config = string_print(decoded_config).split('SPLIT')
return raw_config
# process versions < 3.2.2
def old_des(conf, enckey):
decoded_config = decrypt_des(enckey, conf)
clean_config = string_print(decoded_config)
raw_config = clean_config.split('SPLIT')
return raw_config
def parse_config(raw_config, enckey):
config_dict = {}
for kv in raw_config:
if kv == '':
continue
kv = string_print(kv)
key, value = kv.split('=')
if key == 'ip':
config_dict['Domain'] = value
if key == 'addresses':
dom_list = value.split(',')
dom_count = 0
for dom in dom_list:
if dom == '':
continue
config_dict['Domain {0}'.format(dom_count)] = value.split(':')[0]
config_dict['Port {0}'.format(dom_count)] = value.split(':')[1]
dom_count += 1
if key == 'port':
config_dict['Port'] = value
if key == 'os':
config_dict['OS'] = value
if key == 'mport':
config_dict['MPort'] = value
if key == 'perms':
config_dict['Perms'] = value
if key == 'error':
config_dict['Error'] = value
if key == 'reconsec':
config_dict['RetryInterval'] = value
if key == 'ti':
config_dict['TI'] = value
if key == 'pass':
config_dict['Password'] = value
if key == 'id':
config_dict['CampaignID'] = value
if key == 'mutex':
config_dict['Mutex'] = value
if key == 'toms':
config_dict['TimeOut'] = value
if key == 'per':
config_dict['Persistance'] = value
if key == 'name':
config_dict['InstallName'] = value
if key == 'tiemout':
config_dict['TimeOutFlag'] = value
if key == 'debugmsg':
config_dict['DebugMsg'] = value
config_dict["EncryptionKey"] = enckey.encode('hex')
return config_dict
#Recursive Function Goes Here
def runRecursive(folder, output):
counter1 = 0
counter2 = 0
print("[+] Writing Configs to File {0}".format(output))
with open(output, 'a+') as out:
#This line will need changing per Decoder
out.write("Filename,CampaignID,Domain,Port,OS,MPort,Perms,Error,RetryInterval,TI,Password,Mutex,TimeOut,Persistance,InstallName,TimeOutFlag,DebugMsg,EncryptionKey\n")
for server in os.listdir(folder):
if os.path.isfile(os.path.join(folder, server)):
print("[+] Processing File {0}".format(server))
fileData = open(os.path.join(folder,server), 'rb').read()
configOut = run(fileData)
if configOut != None:
configOut["TimeOutFlag"] = ''
#This line will need changing per Decoder
out.write('{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10},{11},{12},{13},{14},{15},{16},{17}\n'.format(server,configOut["CampaignID"],configOut["Domain"],configOut["Port"],configOut["OS"],configOut["MPort"],configOut["Perms"],configOut["Error"],configOut["RetryInterval"],configOut["TI"],configOut["Password"],configOut["Mutex"],configOut["TimeOut"],configOut["Persistance"],configOut["InstallName"],configOut["TimeOutFlag"],configOut["DebugMsg"],configOut["EncryptionKey"]))
counter1 += 1
counter2 += 1
print("[+] Decoded {0} out of {1} Files".format(counter1, counter2))
return "Complete"
# Main
if __name__ == "__main__":
parser = OptionParser(usage='usage: %prog inFile outConfig\n' + __description__, version='%prog ' + __version__)
parser.add_option("-r", "--recursive", action='store_true', default=False, help="Recursive Mode")
(options, args) = parser.parse_args()
# If we dont have args quit with help page
if len(args) > 0:
pass
else:
parser.print_help()
sys.exit()
# if we want a recursive extract run this function
if options.recursive == True:
if len(args) == 2:
runRecursive(args[0], args[1])
sys.exit()
else:
print("[+] You need to specify Both Dir to read AND Output File")
parser.print_help()
sys.exit()
# If not recurisve try to open file
try:
print("[+] Reading file")
fileData = open(args[0], 'rb').read()
except:
print("[+] Couldn't Open File {0}".format(args[0]))
sys.exit()
#Run the config extraction
print("[+] Searching for Config")
config = run(fileData)
#If we have a config figure out where to dump it out.
if config == None:
print("[+] Config not found")
sys.exit()
#if you gave me two args im going to assume the 2nd arg is where you want to save the file
if len(args) == 2:
print("[+] Writing Config to file {0}".format(args[1]))
with open(args[1], 'a') as outFile:
for key, value in sorted(config.items()):
clean_value = [x for x in value if x in string.printable]
outFile.write("Key: {0}\t Value: {1}\n".format(key,clean_value))
# if no seconds arg then assume you want it printing to screen
else:
print("[+] Printing Config to screen")
for key, value in sorted(config.items()):
clean_value = [x for x in value if x in string.printable]
print(" [-] Key: {0}\t Value: {1}".format(key,clean_value))
print("[+] End of Config")
|
python
|
import json
import uuid
from datetime import datetime
from sqlalchemy.dialects.postgresql import UUID
from app import db
# person_team = db.Table(
# "person_team",
# db.Column(
# "person_id",
# UUID,
# db.ForeignKey("person.id", ondelete="CASCADE"),
# primary_key=True,
# ),
# db.Column(
# "team_id", UUID, db.ForeignKey("team.id", ondelete="CASCADE"), primary_key=True
# ),
# db.Index("ix_person_team_person_id_team_id", "team_id", "person_id", unique=True),
# )
# person_project = db.Table(
# "person_project",
# db.Column(
# "person_id",
# UUID,
# db.ForeignKey("person.id", ondelete="CASCADE"),
# primary_key=True,
# ),
# db.Column(
# "project_id",
# UUID,
# db.ForeignKey("project.id", ondelete="CASCADE"),
# primary_key=True,
# ),
# db.Index(
# "ix_person_project_person_id_project_id", "project_id", "person_id", unique=True
# ),
# )
class Organisation(db.Model):
# Fields
id = db.Column(UUID, primary_key=True)
name = db.Column(db.String(), nullable=False, index=True) # Should this be unique too, or just domain?
domain = db.Column(db.String(), nullable=False, index=True, unique=True)
created_at = db.Column(db.DateTime(timezone=True), nullable=False, index=True)
updated_at = db.Column(db.DateTime(timezone=True), nullable=True)
# Relationships
grades = db.relationship("Grade", backref="organisation")
locations = db.relationship("Location", backref="organisation")
people = db.relationship("Person", backref="organisation")
practices = db.relationship("Practice", backref="organisation")
programmes = db.relationship("Programme", backref="organisation")
projects = db.relationship("Project", backref="organisation")
roles = db.relationship("Role", backref="organisation")
# Methods
def __init__(self, name, domain):
self.id = str(uuid.uuid4())
self.name = name.strip()
self.domain = domain.strip().lower()
self.created_at = datetime.utcnow()
def __repr__(self):
return json.dumps(self.as_dict(), separators=(",", ":"))
def as_dict(self):
return {
"id": self.id,
"name": self.name,
"domain": self.domain,
"grades": len(self.grades),
"locations": len(self.locations),
"people": len(self.people),
"practices": len(self.practices),
"programmes": len(self.programmes),
"projects": len(self.projects),
"roles": len(self.roles),
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
def list_item(self):
return {
"id": self.id,
"name": self.name,
"domain": self.domain,
}
class Location(db.Model):
# Fields
id = db.Column(UUID, primary_key=True)
name = db.Column(db.String(), nullable=False, index=True)
address = db.Column(db.String(), nullable=False)
organisation_id = db.Column(UUID, db.ForeignKey("organisation.id", ondelete="CASCADE"), nullable=False)
created_at = db.Column(db.DateTime(timezone=True), nullable=False, index=True)
updated_at = db.Column(db.DateTime(timezone=True), nullable=True)
# Relationships
people = db.relationship("Person", backref="location", lazy=True)
# Methods
def __init__(self, name, address, organisation_id):
self.id = str(uuid.uuid4())
self.name = name.strip().title()
self.address = address.strip()
self.organisation_id = str(uuid.UUID(organisation_id, version=4))
self.created_at = datetime.utcnow()
def __repr__(self):
return json.dumps(self.as_dict(), separators=(",", ":"))
def as_dict(self):
return {
"id": self.id,
"name": self.name,
"address": self.address,
"organisation": {
"id": self.organisation.id,
"name": self.organisation.name,
},
"people": len(self.people),
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
def list_item(self):
return {"id": self.id, "name": self.name}
class Grade(db.Model):
# Fields
id = db.Column(UUID, primary_key=True)
name = db.Column(db.String(), nullable=False, index=True)
organisation_id = db.Column(UUID, db.ForeignKey("organisation.id", ondelete="CASCADE"), nullable=False)
created_at = db.Column(db.DateTime(timezone=True), nullable=False, index=True)
updated_at = db.Column(db.DateTime(timezone=True), nullable=True)
# Relationships
roles = db.relationship("Role", backref="grade", lazy=True)
# Methods
def __init__(self, name, organisation_id):
self.id = str(uuid.uuid4())
self.name = name.strip()
self.organisation_id = str(uuid.UUID(organisation_id, version=4))
self.created_at = datetime.utcnow()
def __repr__(self):
return json.dumps(self.as_dict(), separators=(",", ":"))
def as_dict(self):
return {
"id": self.id,
"name": self.name,
"organisation": {
"id": self.organisation.id,
"name": self.organisation.name,
},
"roles": len(self.roles),
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
def list_item(self):
return {"id": self.id, "name": self.name}
class Practice(db.Model):
# Fields
id = db.Column(UUID, primary_key=True)
name = db.Column(db.String(), nullable=False, index=True)
head_id = db.Column(UUID, db.ForeignKey("person.id", ondelete="SET NULL"), nullable=True, index=True)
cost_centre = db.Column(db.String(), nullable=True)
organisation_id = db.Column(UUID, db.ForeignKey("organisation.id", ondelete="CASCADE"), nullable=False)
created_at = db.Column(db.DateTime(timezone=True), nullable=False, index=True)
updated_at = db.Column(db.DateTime(timezone=True), nullable=True)
# Relationships
head = db.relationship("Person", uselist=False)
roles = db.relationship("Role", backref="practice", lazy=True)
# Methods
def __init__(self, name, head_id, cost_centre, organisation_id):
self.id = str(uuid.uuid4())
self.name = name.strip().title()
self.head_id = str(uuid.UUID(head_id, version=4)) if head_id else None
self.cost_centre = cost_centre.strip() if cost_centre else None
self.organisation_id = str(uuid.UUID(organisation_id, version=4))
self.created_at = datetime.utcnow()
def __repr__(self):
return json.dumps(self.as_dict(), separators=(",", ":"))
def as_dict(self):
return {
"id": self.id,
"name": self.name,
"head": {
"id": self.head.id,
"name": self.head.name,
}
if self.head
else None,
"cost_centre": self.cost_centre,
"organisation": {
"id": self.organisation.id,
"name": self.organisation.name,
},
"roles": len(self.roles),
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
def list_item(self):
return {
"id": self.id,
"name": self.name,
"head": {
"id": self.head.id,
"name": self.head.name,
}
if self.head
else None,
}
class Role(db.Model):
# Fields
id = db.Column(UUID, primary_key=True)
title = db.Column(db.String(), nullable=False, index=True)
grade_id = db.Column(UUID, db.ForeignKey("grade.id", ondelete="CASCADE"), nullable=False)
practice_id = db.Column(UUID, db.ForeignKey("practice.id", ondelete="CASCADE"), nullable=True)
organisation_id = db.Column(UUID, db.ForeignKey("organisation.id", ondelete="CASCADE"), nullable=False)
created_at = db.Column(db.DateTime(timezone=True), nullable=False, index=True)
updated_at = db.Column(db.DateTime(timezone=True), nullable=True)
# Relationships
people = db.relationship("Person", backref="role", lazy=True)
# Methods
def __init__(self, title, grade_id, practice_id, organisation_id):
self.id = str(uuid.uuid4())
self.title = title.strip()
self.grade_id = str(uuid.UUID(grade_id, version=4))
self.practice_id = str(uuid.UUID(practice_id, version=4)) if practice_id else None
self.organisation_id = str(uuid.UUID(organisation_id, version=4))
self.created_at = datetime.utcnow()
def __repr__(self):
return json.dumps(self.as_dict(), separators=(",", ":"))
def as_dict(self):
return {
"id": self.id,
"title": self.title,
"grade": {"id": self.grade.id, "name": self.grade.name},
"practice": self.practice.list_item() if self.practice else None,
"organisation": {
"id": self.organisation.id,
"name": self.organisation.name,
},
"people": len(self.people),
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
def list_item(self):
return {
"id": self.id,
"title": self.title,
"grade": self.grade.list_item(),
"practice": {"id": self.practice.id, "name": self.practice.name} if self.practice else None,
}
class Person(db.Model):
# Fields
id = db.Column(UUID, primary_key=True)
name = db.Column(db.String, nullable=False)
role_id = db.Column(UUID, db.ForeignKey("role.id", ondelete="CASCADE"), nullable=False, index=True)
organisation_id = db.Column(
UUID,
db.ForeignKey("organisation.id", ondelete="CASCADE"),
nullable=False,
index=True,
)
email_address = db.Column(db.String(254), nullable=False, unique=True)
full_time_equivalent = db.Column(db.Float, nullable=True)
location_id = db.Column(
UUID,
db.ForeignKey("location.id", ondelete="SET NULL"),
nullable=True,
index=True,
)
employment = db.Column(db.String, nullable=True)
created_at = db.Column(db.DateTime(timezone=True), nullable=False, index=True)
updated_at = db.Column(db.DateTime(timezone=True), nullable=True)
# Relationships
# teams = db.relationship(
# "Team",
# secondary=person_team,
# lazy=True,
# backref=db.backref("people", lazy=True),
# )
# projects = db.relationship(
# "Project",
# secondary=person_project,
# lazy=True,
# backref=db.backref("people", lazy=True),
# )
# Methods
def __init__(
self,
name,
role_id,
organisation_id,
email_address,
full_time_equivalent,
location_id,
employment,
):
self.id = str(uuid.uuid4())
self.name = name.strip().title()
self.organisation_id = str(uuid.UUID(organisation_id, version=4))
self.role_id = str(uuid.UUID(role_id, version=4))
self.email_address = email_address.strip().lower()
self.full_time_equivalent = full_time_equivalent
self.location_id = str(uuid.UUID(location_id, version=4))
self.employment = employment.strip()
self.created_at = datetime.utcnow()
def __repr__(self):
return json.dumps(self.as_dict(), separators=(",", ":"))
def as_dict(self):
return {
"id": self.id,
"name": self.name,
"organisation": {
"id": self.organisation.id,
"name": self.organisation.name,
},
"role": self.role.list_item(),
"email_address": self.email_address,
"full_time_equivalent": self.full_time_equivalent,
"location": self.location.list_item(),
"employment": self.employment,
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
def list_item(self):
return {
"id": self.id,
"name": self.name,
"role": self.role.list_item(),
"location": self.location.list_item(),
}
class Programme(db.Model):
# Fields
id = db.Column(UUID, primary_key=True)
name = db.Column(db.String(), nullable=False, index=True)
manager_id = db.Column(UUID, db.ForeignKey("person.id", ondelete="SET NULL"), nullable=True, index=True)
organisation_id = db.Column(UUID, db.ForeignKey("organisation.id", ondelete="CASCADE"), nullable=False)
created_at = db.Column(db.DateTime(timezone=True), nullable=False, index=True)
updated_at = db.Column(db.DateTime(timezone=True), nullable=True)
# Relationships
manager = db.relationship("Person", uselist=False)
projects = db.relationship("Project", backref="programme", lazy=True)
# Methods
def __init__(self, name, manager_id, organisation_id):
self.id = str(uuid.uuid4())
self.name = name.strip()
self.manager_id = str(uuid.UUID(manager_id, version=4)) if manager_id else None
self.organisation_id = str(uuid.UUID(organisation_id, version=4))
self.created_at = datetime.utcnow()
def __repr__(self):
return json.dumps(self.as_dict(), separators=(",", ":"))
def as_dict(self):
return {
"id": self.id,
"name": self.name,
"manager": {
"id": self.manager.id,
"name": self.manager.name,
}
if self.manager
else None,
"organisation": {
"id": self.organisation.id,
"name": self.organisation.name,
},
"projects": len(self.projects),
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
def list_item(self):
return {
"id": self.id,
"name": self.name,
"manager": {
"id": self.manager.id,
"name": self.manager.name,
}
if self.manager
else None,
}
class Project(db.Model):
# Fields
id = db.Column(UUID, primary_key=True)
name = db.Column(db.String(), nullable=False, index=True)
manager_id = db.Column(UUID, db.ForeignKey("person.id", ondelete="SET NULL"), nullable=True, index=True)
programme_id = db.Column(UUID, db.ForeignKey("programme.id"), nullable=True)
status = db.Column(db.String(), nullable=False, index=True)
organisation_id = db.Column(UUID, db.ForeignKey("organisation.id"), nullable=False)
created_at = db.Column(db.DateTime(timezone=True), nullable=False, index=True)
updated_at = db.Column(db.DateTime(timezone=True), nullable=True)
# Relationships
manager = db.relationship("Person", uselist=False)
# teams = db.relationship("Team", backref="project", lazy=True)
# many to many with person
# Methods
def __init__(self, name, manager_id, programme_id, status, organisation_id):
self.id = str(uuid.uuid4())
self.name = name.strip()
self.manager_id = str(uuid.UUID(manager_id, version=4)) if manager_id else None
self.programme_id = str(uuid.UUID(programme_id, version=4)) if programme_id else None
self.status = status.strip()
self.organisation_id = str(uuid.UUID(organisation_id, version=4))
self.created_at = datetime.utcnow()
def __repr__(self):
return json.dumps(self.as_dict(), separators=(",", ":"))
def as_dict(self):
return {
"id": self.id,
"name": self.name,
"manager": {
"id": self.manager.id,
"name": self.manager.name,
}
if self.manager
else None,
"programme": {
"id": self.programme.id,
"name": self.programme.name,
}
if self.programme
else None,
"status": self.status,
"organisation": {
"id": self.organisation.id,
"name": self.organisation.name,
},
"created_at": self.created_at.isoformat(),
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
def list_item(self):
return {
"id": self.id,
"name": self.name,
"manager": {
"id": self.manager.id,
"name": self.manager.name,
}
if self.manager
else None,
"programme": {
"id": self.programme.id,
"name": self.programme.name,
}
if self.programme
else None,
"status": self.status,
}
# class Team(db.Model):
# # Fields
# id = db.Column(UUID, primary_key=True)
# name = db.Column(db.String(), nullable=False, index=True)
# created_at = db.Column(db.DateTime(timezone=True), nullable=False, index=True)
# updated_at = db.Column(db.DateTime(timezone=True), nullable=True)
# # Relationships
# # many to many with person
|
python
|
#!/usr/bin/env python
#
# Code to build the catalogue cache
#
# Usage: python build_cache.py
#
from __future__ import print_function
from sys import stdout
__author__ = "Yu Feng and Martin White"
__version__ = "1.0"
__email__ = "[email protected] or [email protected]"
from imaginglss import DECALS
import numpy
from imaginglss.cli import CLI
from imaginglss.analysis import cache
ap = CLI("Build cache")
ns = ap.parse_args()
decals = DECALS(ns.conf)
print('building brick index')
dr = decals.datarelease
print('building tractor cache')
builder = cache.CacheBuilder(decals.sweep_dir, decals.cache_dir, dr.schema.CATALOGUE_COLUMNS)
builder.build()
print('done')
|
python
|
#Summe der Zahlen von 1 bis 5
summe=0
for i in [1,2,3,4,5]:
summe=summe+i #Beginn eines Blocks
print("Summe von 1 bis ", i,":",summe) #Ende eines Blocks
print("Ende der Rechnung")
|
python
|
# -*- coding: utf-8 -*-
"""
Miscellaneous utilities and tools
"""
import errno
import functools
import keyword
import logging
import os
import re
import shutil
import sys
import traceback
from contextlib import contextmanager
from pathlib import Path
from pkg_resources import parse_version
from . import __version__
from .exceptions import InvalidIdentifier, OldSetuptools
from .log import logger
@contextmanager
def _chdir_logging_context(path, should_log):
"""Private auxiliar function for logging inside chdir"""
if should_log:
logger.report('chdir', path)
with logger.indent():
yield
else:
yield
@contextmanager
def chdir(path, **kwargs):
"""Contextmanager to change into a directory
Args:
path (str): path to change current working directory to
Keyword Args:
log (bool): log activity when true. Default: ``False``.
pretend (bool): skip execution (but log) when pretending.
Default ``False``.
"""
should_pretend = kwargs.get('pretend')
should_log = kwargs.get('log', should_pretend)
# ^ When pretending, automatically output logs
# (after all, this is the primary purpose of pretending)
curr_dir = os.getcwd()
try:
with _chdir_logging_context(path, should_log):
if not should_pretend:
# ToDo: Remove str when we require PY 3.6
os.chdir(str(path)) # str to handle pathlib args
yield
finally:
os.chdir(curr_dir)
def move(*src, **kwargs):
"""Move files or directories to (into) a new location
Args:
*src (str[]): one or more files/directories to be moved
Keyword Args:
target (str): if target is a directory, ``src`` will be moved inside
it. Otherwise, it will be the new path (note that it may be
overwritten)
log (bool): log activity when true. Default: ``False``.
pretend (bool): skip execution (but log) when pretending.
Default ``False``.
"""
target = kwargs['target'] # Required arg
should_pretend = kwargs.get('pretend')
should_log = kwargs.get('log', should_pretend)
# ^ When pretending, automatically output logs
# (after all, this is the primary purpose of pretending)
for path in src:
if not should_pretend:
shutil.move(path, target)
if should_log:
logger.report('move', path, target=target)
def is_valid_identifier(string):
"""Check if string is a valid package name
Args:
string (str): package name
Returns:
bool: True if string is valid package name else False
"""
if not re.match("[_A-Za-z][_a-zA-Z0-9]*$", string):
return False
if keyword.iskeyword(string):
return False
return True
def make_valid_identifier(string):
"""Try to make a valid package name identifier from a string
Args:
string (str): invalid package name
Returns:
str: valid package name as string or :obj:`RuntimeError`
Raises:
:obj:`InvalidIdentifier`: raised if identifier can not be converted
"""
string = string.strip()
string = string.replace("-", "_")
string = string.replace(" ", "_")
string = re.sub('[^_a-zA-Z0-9]', '', string)
string = string.lower()
if is_valid_identifier(string):
return string
else:
raise InvalidIdentifier(
"String cannot be converted to a valid identifier.")
def exceptions2exit(exception_list):
"""Decorator to convert given exceptions to exit messages
This avoids displaying nasty stack traces to end-users
Args:
exception_list [Exception]: list of exceptions to convert
"""
def exceptions2exit_decorator(func):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except tuple(exception_list) as e:
if logger.level <= logging.DEBUG:
# user surely wants to see the stacktrace
traceback.print_exc()
print("ERROR: {}".format(e))
sys.exit(1)
return func_wrapper
return exceptions2exit_decorator
# from http://en.wikibooks.org/, Creative Commons Attribution-ShareAlike 3.0
def levenshtein(s1, s2):
"""Calculate the Levenshtein distance between two strings
Args:
s1 (str): first string
s2 (str): second string
Returns:
int: distance between s1 and s2
"""
if len(s1) < len(s2):
return levenshtein(s2, s1)
# len(s1) >= len(s2)
if len(s2) == 0:
return len(s1)
previous_row = range(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
def prepare_namespace(namespace_str):
"""Check the validity of namespace_str and split it up into a list
Args:
namespace_str (str): namespace, e.g. "com.blue_yonder"
Returns:
[str]: list of namespaces, e.g. ["com", "com.blue_yonder"]
Raises:
:obj:`InvalidIdentifier` : raised if namespace is not valid
"""
namespaces = namespace_str.split('.') if namespace_str else list()
for namespace in namespaces:
if not is_valid_identifier(namespace):
raise InvalidIdentifier(
"{} is not a valid namespace package.".format(namespace))
return ['.'.join(namespaces[:i+1]) for i in range(len(namespaces))]
def check_setuptools_version():
"""Check minimum required version of setuptools
Check that setuptools has all necessary capabilities for setuptools_scm
as well as support for configuration with the help of ``setup.cfg``.
Raises:
:obj:`OldSetuptools` : raised if necessary capabilities are not met
"""
try:
from setuptools import __version__ as setuptools_ver
from pkg_resources import parse_version
except ImportError:
raise OldSetuptools
setuptools_too_old = parse_version(setuptools_ver) < parse_version('38.3')
setuptools_scm_check_failed = True
if setuptools_too_old or setuptools_scm_check_failed:
raise OldSetuptools
def create_file(path, content, pretend=False):
"""Create a file in the given path.
This function reports the operation in the logs.
Args:
path (str): path in the file system where contents will be written.
content (str): what will be written.
pretend (bool): false by default. File is not written when pretending,
but operation is logged.
"""
if not pretend:
with open(path, 'w', encoding='utf-8') as fh:
fh.write(content)
logger.report('create', path)
def create_directory(path, update=False, pretend=False):
"""Create a directory in the given path.
This function reports the operation in the logs.
Args:
path (str): path in the file system where contents will be written.
update (bool): false by default. A :obj:`OSError` is raised when update
is false and the directory already exists.
pretend (bool): false by default. Directory is not created when
pretending, but operation is logged.
"""
if not pretend:
try:
os.mkdir(path)
except OSError:
if not update:
raise
return # Do not log if not created
logger.report('create', path)
def dasherize(word):
"""Replace underscores with dashes in the string.
Example::
>>> dasherize("foo_bar")
"foo-bar"
Args:
word (str): input word
Returns:
input word with underscores replaced by dashes
"""
return word.replace('_', '-')
def get_id(function):
"""Given a function, calculate its identifier.
A identifier is a string in the format ``<module name>:<function name>``,
similarly to the convention used for setuptools entry points.
Note:
This function does not return a Python 3 ``__qualname__`` equivalent.
If the function is nested inside another function or class, the parent
name is ignored.
Args:
function (callable): function object
Returns:
str: identifier
"""
return '{}:{}'.format(function.__module__, function.__name__)
def localize_path(path_string):
"""Localize path for Windows, Unix, i.e. / or \
Args:
path_string (str): path using /
Returns:
str: path depending on OS
"""
return str(Path(path_string))
#: Windows-specific error code indicating an invalid pathname.
ERROR_INVALID_NAME = 123
def is_pathname_valid(pathname):
"""Check if a pathname is valid
Code by Cecil Curry from StackOverflow
Args:
pathname (str): string to validate
Returns:
`True` if the passed pathname is a valid pathname for the current OS;
`False` otherwise.
"""
# If this pathname is either not a string or is but is empty, this pathname
# is invalid.
try:
if not isinstance(pathname, str) or not pathname:
return False
# Strip this pathname's Windows-specific drive specifier (e.g., `C:\`)
# if any. Since Windows prohibits path components from containing `:`
# characters, failing to strip this `:`-suffixed prefix would
# erroneously invalidate all valid absolute Windows pathnames.
_, pathname = os.path.splitdrive(pathname)
# Directory guaranteed to exist. If the current OS is Windows, this is
# the drive to which Windows was installed (e.g., the "%HOMEDRIVE%"
# environment variable); else, the typical root directory.
root_dirname = os.environ.get('HOMEDRIVE', 'C:') \
if sys.platform == 'win32' else os.path.sep
assert os.path.isdir(root_dirname) # ...Murphy and her ironclad Law
# Append a path separator to this directory if needed.
root_dirname = root_dirname.rstrip(os.path.sep) + os.path.sep
# Test whether each path component split from this pathname is valid or
# not, ignoring non-existent and non-readable path components.
for pathname_part in pathname.split(os.path.sep):
try:
os.lstat(root_dirname + pathname_part)
# If an OS-specific exception is raised, its error code
# indicates whether this pathname is valid or not. Unless this
# is the case, this exception implies an ignorable kernel or
# filesystem complaint (e.g., path not found or inaccessible).
#
# Only the following exceptions indicate invalid pathnames:
#
# * Instances of the Windows-specific "WindowsError" class
# defining the "winerror" attribute whose value is
# "ERROR_INVALID_NAME". Under Windows, "winerror" is more
# fine-grained and hence useful than the generic "errno"
# attribute. When a too-long pathname is passed, for example,
# "errno" is "ENOENT" (i.e., no such file or directory) rather
# than "ENAMETOOLONG" (i.e., file name too long).
# * Instances of the cross-platform "OSError" class defining the
# generic "errno" attribute whose value is either:
# * Under most POSIX-compatible OSes, "ENAMETOOLONG".
# * Under some edge-case OSes (e.g., SunOS, *BSD), "ERANGE".
except OSError as exc:
if hasattr(exc, 'winerror'):
if exc.winerror == ERROR_INVALID_NAME:
return False
elif exc.errno in {errno.ENAMETOOLONG, errno.ERANGE}:
return False
# If a "TypeError" exception was raised, it almost certainly has the
# error message "embedded NUL character" indicating an invalid pathname.
except TypeError:
return False
# If no exception was raised, all path components and hence this
# pathname itself are valid. (Praise be to the curmudgeonly python.)
else:
return True
# If any other exception was raised, this is an unrelated fatal issue
# (e.g., a bug). Permit this exception to unwind the call stack.
#
# Did we mention this should be shipped with Python already?
def on_ro_error(func, path, exc_info):
"""Error handler for ``shutil.rmtree``.
If the error is due to an access error (read only file)
it attempts to add write permission and then retries.
If the error is for another reason it re-raises the error.
Usage : ``shutil.rmtree(path, onerror=onerror)``
Args:
func (callable): function which raised the exception
path (str): path passed to `func`
exc_info (tuple of str): exception info returned by sys.exc_info()
"""
import stat
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
func(path)
else:
raise
def rm_rf(path):
"""Remove a path by all means like `rm -rf` in Linux.
Args (str): Path to remove:
"""
shutil.rmtree(path, onerror=on_ro_error)
|
python
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from odahuflow.sdk.models.base_model_ import Model
from odahuflow.sdk.models import util
class ExternalUrl(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, image_url: str=None, name: str=None, url: str=None): # noqa: E501
"""ExternalUrl - a model defined in Swagger
:param image_url: The image_url of this ExternalUrl. # noqa: E501
:type image_url: str
:param name: The name of this ExternalUrl. # noqa: E501
:type name: str
:param url: The url of this ExternalUrl. # noqa: E501
:type url: str
"""
self.swagger_types = {
'image_url': str,
'name': str,
'url': str
}
self.attribute_map = {
'image_url': 'imageUrl',
'name': 'name',
'url': 'url'
}
self._image_url = image_url
self._name = name
self._url = url
@classmethod
def from_dict(cls, dikt) -> 'ExternalUrl':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The ExternalUrl of this ExternalUrl. # noqa: E501
:rtype: ExternalUrl
"""
return util.deserialize_model(dikt, cls)
@property
def image_url(self) -> str:
"""Gets the image_url of this ExternalUrl.
Optional link to an image which represents a type of the resource, for example the logo of Grafana # noqa: E501
:return: The image_url of this ExternalUrl.
:rtype: str
"""
return self._image_url
@image_url.setter
def image_url(self, image_url: str):
"""Sets the image_url of this ExternalUrl.
Optional link to an image which represents a type of the resource, for example the logo of Grafana # noqa: E501
:param image_url: The image_url of this ExternalUrl.
:type image_url: str
"""
self._image_url = image_url
@property
def name(self) -> str:
"""Gets the name of this ExternalUrl.
Human-readable name # noqa: E501
:return: The name of this ExternalUrl.
:rtype: str
"""
return self._name
@name.setter
def name(self, name: str):
"""Sets the name of this ExternalUrl.
Human-readable name # noqa: E501
:param name: The name of this ExternalUrl.
:type name: str
"""
self._name = name
@property
def url(self) -> str:
"""Gets the url of this ExternalUrl.
Link to a resource # noqa: E501
:return: The url of this ExternalUrl.
:rtype: str
"""
return self._url
@url.setter
def url(self, url: str):
"""Sets the url of this ExternalUrl.
Link to a resource # noqa: E501
:param url: The url of this ExternalUrl.
:type url: str
"""
self._url = url
|
python
|
from bs4 import BeautifulSoup, SoupStrainer
import re
import requests
import json
strained = SoupStrainer('a', href=re.compile('saskatchewan.kijiji.ca/f.*QQ'))
soup = BeautifulSoup(requests.get('http://saskatchewan.kijiji.ca').text)
category_dict = {}
for a in soup.findAll(strained):
category_id = None
category = []
for key in str(a.string).split(", "):
category.append(key)
category_id_matches = re.search('CatIdZ(\d+)', a['href'])
if(category_id_matches):
category_id = category_id_matches.group(1)
if(category_id and category):
for key in category:
category_dict[key] = int(category_id)
if(category_dict):
with open('../pykijiji/categories.json', 'w') as f:
json.dump(
category_dict,
f,
sort_keys=True,
indent=2
)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import connection, DatabaseError, transaction
import django_rq
from services.monitoring import test_service
from services.models import Service
def _create_history_partitions():
now = datetime.datetime.now()
required_partitions = [
(now + datetime.timedelta(days=1)).strftime("p%Y%m%d"),
(now + datetime.timedelta(days=2)).strftime("p%Y%m%d"),
(now + datetime.timedelta(days=3)).strftime("p%Y%m%d")
]
partitions_conditions = {
(now + datetime.timedelta(days=1)).strftime(
"p%Y%m%d",
): (now + datetime.timedelta(days=1)).strftime("%Y-%m-%d"),
(now + datetime.timedelta(days=2)).strftime(
"p%Y%m%d",
): (now + datetime.timedelta(days=2)).strftime("%Y-%m-%d"),
(now + datetime.timedelta(days=3)).strftime(
"p%Y%m%d",
): (now + datetime.timedelta(days=3)).strftime("%Y-%m-%d")
}
sql = """
SELECT
partition_name
FROM INFORMATION_SCHEMA.PARTITIONS
WHERE
table_schema=%s AND
table_name='services_servicehistory' AND
partition_name<>'p_other'
ORDER BY partition_name ASC
"""
cursor = connection.cursor()
cursor.execute(sql, [settings.DATABASES['default']['NAME']])
current_partitions = []
for row in cursor.fetchall():
current_partitions.append(row[0])
sql_parts = []
for partition_name in required_partitions:
if partition_name not in current_partitions:
sql_parts.append(
"PARTITION %s VALUES LESS THAN (TO_DAYS('%s'))" % (
partition_name, partitions_conditions[partition_name],
),
)
if not sql_parts:
return
sql = "ALTER TABLE services_servicehistory ADD PARTITION (%s)" % (
",".join(sql_parts),
)
cursor.execute(sql)
def create_history_partitions():
queue = django_rq.get_queue(
name='archiving' if 'archiving' in settings.RQ_QUEUES else 'default',
)
queue.enqueue_call(
func=_create_history_partitions,
timeout=300,
result_ttl=0,
)
def _create_archive_partitions():
now = datetime.datetime.now()
if now.month == 12:
next_year = now.year + 1
next_month = 1
else:
next_year = now.year
next_month = now.month + 1
next_month1 = datetime.date(next_year, next_month, 1)
if next_month1.month == 12:
next_year = next_month1.year + 1
next_month = 1
else:
next_year = next_month1.year
next_month = next_month1.month + 1
next_month2 = datetime.date(next_year, next_month, 1)
required_partitions = [
next_month1.strftime("p%Y%m"),
next_month2.strftime("p%Y%m")
]
partitions_conditions = {
next_month1.strftime("p%Y%m"): next_month1.strftime("%Y-%m-01"),
next_month2.strftime("p%Y%m"): next_month2.strftime("%Y-%m-01"),
}
sql = """
SELECT
partition_name
FROM INFORMATION_SCHEMA.PARTITIONS
WHERE
table_schema=%s AND
table_name='services_servicehistoryarchive' AND
partition_name<>'p_other'
ORDER BY partition_name ASC
"""
cursor = connection.cursor()
cursor.execute(sql, [settings.DATABASES['default']['NAME']])
current_partitions = []
for row in cursor.fetchall():
current_partitions.append(row[0])
sql_parts = []
for partition_name in required_partitions:
if partition_name not in current_partitions:
sql_parts.append(
"PARTITION %s VALUES LESS THAN (TO_DAYS('%s'))" % (
partition_name, partitions_conditions[partition_name])
)
if not sql_parts:
return
sql = "ALTER TABLE services_servicehistoryarchive ADD PARTITION (%s)" % (
",".join(sql_parts),
)
cursor.execute(sql)
def create_archive_partitions():
queue = django_rq.get_queue(
name='archiving' if 'archiving' in settings.RQ_QUEUES else 'default',
)
queue.enqueue_call(
func=_create_archive_partitions,
timeout=300,
result_ttl=0,
)
def _make_history_archive():
transaction.enter_transaction_management()
transaction.managed()
transaction.commit()
date_start = datetime.datetime.now() - datetime.timedelta(days=8)
sql = """
SELECT MIN(id) AS min_id, MAX(id) AS max_id
FROM services_servicehistory
WHERE created >= %s AND created <= %s
ORDER BY id DESC LIMIT 1
"""
cursor = connection.cursor()
cursor.execute(sql, [
date_start.strftime("%Y-%m-%d 00:00:01"),
date_start.strftime("%Y-%m-%d 23:59:59"),
])
row = cursor.fetchone()
if row is None:
return
min_deleted_id = row[0]
max_deleted_id = row[1]
if not min_deleted_id or not max_deleted_id:
return
sql = """
INSERT INTO services_servicehistoryarchive (
response_time,
namelookup_time,
connect_time,
pretransfer_time,
starttransfer_time,
redirect_time,
size_download,
speed_download,
redirect_count,
num_connects,
created,
service_id,
agent_id
)
SELECT
ROUND(AVG(response_time), 2) AS response_time,
ROUND(AVG(namelookup_time), 2) AS namelookup_time,
ROUND(AVG(connect_time), 2) AS connect_time,
ROUND(AVG(pretransfer_time), 2) AS pretransfer_time,
ROUND(AVG(starttransfer_time), 2) AS starttransfer_time,
ROUND(AVG(redirect_time), 2) AS redirect_time,
ROUND(AVG(size_download), 0) AS size_download,
ROUND(AVG(speed_download), 0) AS speed_download,
ROUND(AVG(redirect_count), 0) AS redirect_count,
ROUND(AVG(num_connects), 0) AS num_connects,
CASE
WHEN MINUTE(created) >= 45 THEN date_format(created, '%%Y-%%m-%%d %%H:45')
WHEN MINUTE(created) < 45 AND MINUTE(created) >= 30 THEN date_format(created, '%%Y-%%m-%%d %%H:30')
WHEN MINUTE(created) < 30 AND MINUTE(created) >= 15 THEN date_format(created, '%%Y-%%m-%%d %%H:15')
ELSE date_format(created, '%%Y-%%m-%%d %%H:00')
END AS created_at,
service_id,
agent_id
FROM
services_servicehistory
WHERE
created >= %s AND created <= %s
GROUP BY
created_at, service_id, agent_id;
"""
try:
cursor.execute(sql, [
date_start.strftime("%Y-%m-%d 00:00:01"),
date_start.strftime("%Y-%m-%d 23:59:59"),
])
except DatabaseError:
transaction.rollback()
return
sql = """
DELETE FROM services_servicehistoryextra
WHERE service_history_id >= %s AND service_history_id <= %s
"""
try:
cursor.execute(sql, [min_deleted_id, max_deleted_id])
except DatabaseError:
transaction.rollback()
return
sql = """
SELECT
partition_name
FROM INFORMATION_SCHEMA.PARTITIONS
WHERE
table_schema=%s AND
table_name='services_servicehistory' AND
partition_name<>'p_other'
ORDER BY partition_name ASC
"""
try:
cursor.execute(sql, [settings.DATABASES['default']['NAME']])
except DatabaseError:
transaction.rollback()
return
current_partitions = []
for row in cursor.fetchall():
current_partitions.append(row[0])
partition_to_delete = (
date_start + datetime.timedelta(days=1)
).strftime("p%Y%m%d")
if partition_to_delete not in current_partitions:
return
sql = "ALTER TABLE services_servicehistory DROP PARTITION %s" % (
partition_to_delete,
)
try:
cursor.execute(sql)
except DatabaseError:
transaction.rollback()
return
transaction.commit()
def make_history_archive():
queue = django_rq.get_queue(
name='archiving' if 'archiving' in settings.RQ_QUEUES else 'default',
)
queue.enqueue_call(
func=_make_history_archive,
timeout=3600,
result_ttl=0,
)
def _monitor_service(service):
test_service(service)
def monitor_all():
queue = django_rq.get_queue(
name='dispacher' if 'dispacher' in settings.RQ_QUEUES else 'default',
)
services = Service.objects.filter(is_technical_break=False, is_active=True)
for service in services:
queue.enqueue_call(
func=_monitor_service,
kwargs={'service': service},
timeout=60,
result_ttl=0,
)
|
python
|
# src/chara/character.py
import enum
class C_type(enum.Enum):
PLAYER = 0
NPC = 1
OPPONENT = 2
BOSS = 3
class Character():
def __init__(self,name,c_type):
types = Character.__ty()
self.name = name
self.c_type = types[c_type]
# temporary function
def identity(self):
print(str(self.name) + " : " + str(self.c_type))
# private functions
def __ty():
types = {}
types[C_type.PLAYER] = "player"
types[C_type.NPC] = "npc"
types[C_type.OPPONENT] = "opponent"
types[C_type.BOSS] = "boss"
return types
|
python
|
from peewee import IntegerField, Model, CompositeKey, ForeignKeyField
from data.db import database
from data.user import User
class Buddies(Model):
buddy1 = ForeignKeyField(User, to_field="id")
buddy2 = ForeignKeyField(User, to_field="id")
class Meta:
database = database
primary_key = CompositeKey('buddy1', 'buddy2')
|
python
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
# set a seed for the random number distribution before shuffling the data (images)
random.seed(101)
random.shuffle(dataset)
# set the same seed before shuffling the corresponding labels to get the same random number distribution
random.seed(101)
random.shuffle(labels)
|
python
|
""" Financial Modeling Prep Model """
__docformat__ = "numpy"
import pandas as pd
import FundamentalAnalysis as fa
from gamestonk_terminal import config_terminal as cfg
def get_rating(ticker: str) -> pd.DataFrame:
"""Get ratings for a given ticker. [Source: Financial Modeling Prep]
Parameters
----------
ticker : str
Stock ticker
Returns
-------
pd.DataFrame
Rating data
"""
return fa.rating(ticker, cfg.API_KEY_FINANCIALMODELINGPREP)
|
python
|
#!/usr/bin/env python
"""tests for :mod:`online_pomdp_planning.mcts`"""
from functools import partial
from math import log, sqrt
from typing import Dict
import pytest
from online_pomdp_planning.mcts import (
ActionNode,
DeterministicNode,
MuzeroInferenceOutput,
ObservationNode,
backprop_running_q,
create_muzero_root,
create_root_node_with_child_for_all_actions,
deterministic_qval_backpropagation,
expand_node_with_all_actions,
has_simulated_n_times,
max_q_action_selector,
max_visits_action_selector,
muzero_expand_node,
random_policy,
rollout,
select_action,
select_deterministc_leaf_by_max_scores,
select_leaf_by_max_scores,
ucb,
ucb_scores,
visit_prob_action_selector,
)
from online_pomdp_planning.types import Action
from online_pomdp_planning.utils import MovingStatistic
def test_action_constructor():
"""Tests initiation of action nodes"""
stats = (True, False, 10.0)
p = ObservationNode()
n = ActionNode(stats, p)
assert stats == n.stats
assert p == n.parent
some_other_parent = ObservationNode()
some_other_statistics = (1, 2, 3, 4)
assert some_other_parent != n.parent
assert some_other_statistics != n.stats
@pytest.mark.parametrize("observation", [((0)), (False), ((0, 1))])
def test_action_node_child(observation):
"""checks getting and setting child nodes"""
root = ObservationNode()
n = ActionNode(initial_statistics=None, parent=root)
# if child not in node, do not allow fetching it
with pytest.raises(KeyError):
n.observation_node(observation)
child = ObservationNode(parent=n)
n.add_observation_node(observation, child)
# cannot modify existing child
with pytest.raises(AssertionError):
n.add_observation_node(observation, child)
# now child is in node, make sure the correct thing is returned
assert child == n.observation_node(observation)
@pytest.mark.parametrize(
"parent", [(None), (ActionNode("garbage statistic", ObservationNode()))]
)
def test_observation_node__constructor(parent):
"""Tests initiation of observation nodes"""
n = ObservationNode(parent)
assert parent == n.parent
other_node = ActionNode("garbage statistic", ObservationNode())
assert other_node != n.parent
@pytest.mark.parametrize("action", [((0)), (False), ((0, 1))])
def test_observation_node_child(action):
"""checks getting and setting child nodes"""
n = ObservationNode()
# if child not in node, do not allow fetching it
with pytest.raises(KeyError):
n.action_node(action)
child = ActionNode("some statistic", parent=n)
n.add_action_node(action, child)
# cannot modify existing child
with pytest.raises(AssertionError):
n.add_action_node(action, child)
# now child is in node, make sure the correct thing is returned
assert child == n.action_node(action)
def test_observation_child_stats():
"""Tests getting children statistics"""
node = ObservationNode()
action_1 = -0.5
child_1 = ActionNode((1, 2, 3), node)
node.add_action_node(action_1, child_1)
action_2 = True
child_2 = ActionNode((True, False, ("garbage")), node)
node.add_action_node(action_2, child_2)
assert node.child_stats == {
action_1: child_1.stats,
action_2: child_2.stats,
}
def test_deterministic_node():
"""Tests :class:`DeterministicNode`"""
root = DeterministicNode({"stat1": 1, "stat2": "bla"}, None)
assert not root.expanded
assert root.stats["stat1"] == 1
assert root.child_stats == {}
assert root.parent is None
child = DeterministicNode({"childstat1": 2}, root)
root.add_child("some_action", child)
assert root.expanded
assert not child.expanded
assert root.child("some_action") == child
assert root.parent is None
assert child.parent == root
with pytest.raises(KeyError):
root.child("other action")
assert root.stats["stat1"] == 1
assert root.child_stats == {"some_action": child.stats}
@pytest.mark.parametrize(
"n,it,expectation", [(5, 4, False), (5, 5, True), (5, 6, True), (0, 0, True)]
)
def test_has_simulated_n_times(n, it, expectation):
"""Tests :func:`online_pomdp_planning.mcts.has_simulated_n_times`"""
assert has_simulated_n_times(n, {"iteration": it}) == expectation
def test_has_simulated_n_times_asserts():
"""Tests :func:`online_pomdp_planning.mcts.has_simulated_n_times` assertions"""
with pytest.raises(AssertionError):
has_simulated_n_times(-1, {"iteration": 0})
with pytest.raises(AssertionError):
has_simulated_n_times(1, {"iteration": -1})
with pytest.raises(KeyError):
has_simulated_n_times(10, {"iteration_typo": 100})
@pytest.mark.parametrize(
"actions,init_stats",
[
([False, 1, (10, 2)], "some garbage"),
([], {"qval": 10, "n": 0}),
],
)
def test_create_root_node_with_child_for_all_actions(actions, init_stats):
"""Tests :func:`~online_pomdp_planning.mcts.create_root_node_with_child_for_all_actions`"""
node = create_root_node_with_child_for_all_actions(actions, init_stats)
for a in actions:
assert node.action_node(a).stats == init_stats
assert node.action_node(a).parent == node
assert node.action_node(a).observation_nodes == {}
def test_create_muzero_root():
"""tests :func:`create_muzero_root`"""
latent_state = "latent_state"
reward = 1.2
prior: Dict[Action, float] = {"a1": 0.2, "a3": 0.5, "a5": 0.3}
noise_dirichlet_alpha = 10
noise_exploration_fraction = 0.2
root = create_muzero_root(
latent_state, reward, prior, noise_dirichlet_alpha, noise_exploration_fraction
)
assert root.stats["latent_state"] == latent_state
assert root.stats["reward"] == reward
assert root.stats["qval"] == 0
assert root.stats["n"] == 0
stats = root.child_stats
assert len(stats) == 3
assert pytest.approx(sum(x["prior"] for x in stats.values()), 1)
for a, stat in stats.items():
assert pytest.approx(stat["prior"]) != prior[a]
for a, stat in stats.items():
assert stat["qval"] == 0
assert stat["n"] == 0
assert stat["action"] == a
# tests on prior and setting noise
# little noise:
root = create_muzero_root(
latent_state, reward, prior, noise_dirichlet_alpha, 0.000001
)
for a, stat in root.child_stats.items():
assert pytest.approx(stat["prior"], rel=0.001) == prior[a]
# much noise:
root = create_muzero_root(latent_state, reward, prior, 100000, 1)
for a, stat in root.child_stats.items():
assert pytest.approx(stat["prior"], rel=0.01) == 1 / 3
@pytest.mark.parametrize(
"stats,max_a",
[
({0: {"useless_stuff": None, "qval": 0.1}}, 0),
({0: {"qval": -0.1}}, 0),
({0: {"qval": 0.1, "some usless things": 100}, 10: {"qval": -0.1}}, 0),
({0: {"qval": 0.1}, 10: {"qval": 1}}, 10),
({True: {"qval": 100}, 0: {"qval": 0.1}, 10: {"qval": 1}}, True),
],
)
def test_max_q_action_selector(stats, max_a):
"""tests :func:~online_pomdp_planning.mcts.max_q_action_selector"""
info = {}
assert max_q_action_selector(stats, info) == max_a
sorted_q_vals = info["max_q_action_selector-values"]
assert sorted_q_vals[0][0] == max_a
assert len(sorted_q_vals) == len(stats)
for x in sorted_q_vals:
assert len(x) == 2
print(x)
assert stats[x[0]]["qval"] == x[1]
@pytest.mark.parametrize(
"stats,max_a",
[
({"max_a": {"n": -1}}, "max_a"),
({"max_a": {"n": 11}, False: {"n": 10}}, "max_a"),
(
{False: {"n": 10}, True: {"uselessstuff": 10, "n": 15}, "a1": {"n": 1}},
True,
),
],
)
def test_max_visits_action_selector(stats, max_a):
"""tests :func:`max_visits_action_selector`"""
info = {}
assert max_visits_action_selector(stats, info) == max_a
act_to_visits = info["visit_action_selector-counts"]
assert len(act_to_visits) == len(stats)
assert act_to_visits[0][0] == max_a
for a, n in act_to_visits:
assert stats[a]["n"] == n
@pytest.mark.parametrize(
"stats,tot,max_a",
[
({"max_a": {"n": 1}}, 1, "max_a"),
({"max_a": {"n": 100}, False: {"n": 1}}, 101, "max_a"),
(
{False: {"n": 10}, True: {"uselessstuff": 10, "n": 10000}, "a1": {"n": 0}},
10010,
True,
),
],
)
def test_visit_prob_action_selector(stats, tot, max_a):
"""tests :func:`visit_prob_action_selector`"""
info = {}
assert visit_prob_action_selector(stats, info) == max_a
act_to_visits = info["visit_action_selector-counts"]
assert len(act_to_visits) == len(stats)
assert act_to_visits[0][0] == max_a
for a, n in act_to_visits:
assert stats[a]["n"] == n
acts_to_probs = info["visit_action_selector-probabilities"]
assert acts_to_probs[0][0] == max_a
for a, n in acts_to_probs:
assert stats[a]["n"] / tot == n
@pytest.mark.parametrize(
"o,actions,init_stats",
[
(10, [0, True, (10.0)], {"q-value": 0, "n": 0}),
(10, [0, (10.0)], {"q-value": 10, "n": 0}),
],
)
def test_expand_node_with_all_actions(o, actions, init_stats):
"""tests :func:~online_pomdp_planning.mcts.expand_node_with_all_actions"""
parent = ObservationNode()
stats = 0
node = ActionNode(stats, parent)
info = {}
expand_node_with_all_actions(actions, init_stats, o, node, info)
expansion = node.observation_node(o)
assert info["mcts_num_action_nodes"] == 1
assert expansion.parent is node
assert node.observation_node(o) is expansion
assert len(expansion.action_nodes) == len(actions)
for n in expansion.action_nodes.values():
assert len(n.observation_nodes) == 0
assert n.parent == expansion
assert n.stats == init_stats
assert n.stats is not init_stats # please be copy
def fake_muzero_recurrance_inference(
state, action, value, reward, policy, latent_state
):
"""Just fakes doing inference in muzero"""
return MuzeroInferenceOutput(value, reward, policy, latent_state)
def test_muzero_expand_node():
"""tests "py:func:`muzero_expand_node`"""
info = {}
root = DeterministicNode(
{"latent_state": "root", "reward": 0.5, "n": 0, "qval": 0.0}, None
)
first_leaf = DeterministicNode(
{"prior": 0.1, "action": "a1", "n": 3, "qval": 0.0}, root
)
root.add_child("a1", first_leaf)
assert not first_leaf.expanded
latent_state = "first_leaf_state"
reward = -0.23
value = 2.2
policy = {"a1": 0.4, "a2": 0.6}
returned_value = muzero_expand_node(
first_leaf,
info,
partial(
fake_muzero_recurrance_inference,
value=value,
reward=reward,
policy=policy,
latent_state=latent_state,
),
)
assert returned_value == value
assert first_leaf.stats["latent_state"] == latent_state
assert first_leaf.stats["reward"] == reward
assert len(first_leaf.children) == 2
for stats in first_leaf.child_stats.values():
assert stats["n"] == 0
assert stats["qval"] == 0
for a in ["a1", "a2"]:
assert first_leaf.child(a).stats["prior"] == policy[a]
@pytest.mark.parametrize(
"q,n,n_total,ucb_constant,expected_raise",
[
(123, 0, 234, 452, False),
(0, 0, -234, False, True),
(0, -1, 10, False, True),
(0, 1, 1, 0, False),
(-5.2, 1, 1, 1, False),
],
)
def test_ucb_raises(q, n, n_total, ucb_constant, expected_raise):
"""Tests that :func:`~online_pomdp_planning.mcts.ucb` raises on invalid input"""
if expected_raise:
with pytest.raises(AssertionError):
ucb(q, n, n_total, ucb_constant)
else:
ucb(q, n, n_total, ucb_constant)
@pytest.mark.parametrize(
"q,n,n_total,ucb_constant,expectation",
[
(123, 0, 234, 452, float("inf")),
(0, 1, 1, 1, sqrt(log(1) / 1)),
(-5.2, 1, 1, 1, -5.2 + sqrt(log(1) / 1)),
(134, 3, 4, 1, 134 + sqrt(log(4) / 3)),
(1, 1, 1, 50.3, 1 + 50.3 * sqrt(log(1) / 1)),
(1, 1, 10, 50.3, 1 + 50.3 * sqrt(log(10) / 1)),
],
)
def test_ucb(q, n, n_total, ucb_constant, expectation):
"""Tests :func:`~online_pomdp_planning.mcts.ucb`"""
assert ucb(q, n, n_total, ucb_constant) == expectation
def test_ucb_scores():
"""tests `func:ucb_scores`"""
u = 50.3
action_stats = {
"a1": {"qval": 10, "n": 9},
True: {"qval": 1, "n": 1},
10: {"qval": 3, "n": 0},
}
action_scores = ucb_scores(action_stats, {}, u)
assert {"a1", True, 10} == set(action_scores.keys())
assert action_scores[10] == float("inf")
assert action_scores[True] == 1 + 50.3 * sqrt(log(10) / 1)
@pytest.mark.parametrize(
"expected_action,u,stats",
[
(True, 0, {True: {"qval": 10, "n": 10000}, 2: {"qval": 9, "n": 1}}),
(2, 1, {True: {"qval": 10, "n": 10000}, 2: {"qval": 9, "n": 1}}),
(
(1, 2),
1,
{
True: {"qval": 10, "n": 10000},
2: {"qval": 9, "n": 1},
(1, 2): {"qval": 10, "n": 1},
},
),
],
)
def test_select_with_ucb(expected_action, u, stats):
"""Tests :func:`~online_pomdp_planning.mcts.select_with_ucb`"""
scoring_method = partial(ucb_scores, ucb_constant=u)
assert select_action(stats, {}, scoring_method) == expected_action
def test_select_with_ucb_is_random():
"""Tests :func:`~online_pomdp_planning.mcts.select_with_ucb` is random"""
# 2 == bla
stats = {
True: {"qval": 10, "n": 10000},
2: {"qval": 9, "n": 1},
"bla": {"qval": 9, "n": 1},
}
scoring_method = partial(ucb_scores, ucb_constant=10)
chosen_actions = {select_action(stats, {}, scoring_method) for _ in range(20)}
assert len(chosen_actions) == 2
def construct_ucb_tree(observation_from_simulator) -> ObservationNode:
"""Constructs a particular tree for UCB
Tree: (action -> stats or obs)
- ``False`` -> `(q=3.4, n=3)`:
- ``True``
- `(100)`
- 2:
- `(10, 2)` -> `(qval: 0, n: 0)`
- 2 -> `(q=3.4, n=3)`
According to UCB, the best first action is ``False``, the only second action is `(10, 2)`
"""
root = ObservationNode()
# two initial action nodes, action `False` is better
better_first_action = False
better_first_action_node = ActionNode({"qval": 3.4, "n": 3}, root)
worse_first_action = 2
worse_first_action_node = ActionNode({"qval": -2.0, "n": 4}, root)
root.add_action_node(better_first_action, better_first_action_node)
root.add_action_node(worse_first_action, worse_first_action_node)
# three observation nodes; observation `2` is returned by simulator
first_picked_observation_node = ObservationNode(better_first_action_node)
better_first_action_node.add_observation_node(
observation_from_simulator, first_picked_observation_node
)
better_first_action_node.add_observation_node(
True, ObservationNode(better_first_action_node)
)
better_first_action_node.add_observation_node(
(100), ObservationNode(better_first_action_node)
)
# one leaf action node
leaf_action_node = ActionNode({"qval": 0, "n": 0}, first_picked_observation_node)
better_first_action_node.observation_node(
observation_from_simulator
).add_action_node((10, 2), leaf_action_node)
return root
def run_ucb_select_leaf(observation_from_simulator, root, max_depth=1000):
"""Runs UCB with a typical simulator from root"""
def sim(s, a):
"""Fake simulator, returns state 0, obs 2, reward .5, not terminal, and info"""
return 0, observation_from_simulator, 0.5, False
info = {}
scoring_method = partial(ucb_scores, ucb_constant=1)
chosen_leaf, s, obs, term, rewards = select_leaf_by_max_scores(
sim=sim,
scoring_method=scoring_method,
max_depth=max_depth,
node=root,
info=info,
state=1,
)
return chosen_leaf, s, obs, term, rewards, info
def run_ucb_select_leaf_terminal_sim(observation_from_simulator, root):
"""Runs UCB with a terminal simulator from root"""
def term_sim(s, a):
"""Returns the same as :func:`sim` but sets terminal flag to ``True``"""
return 0, observation_from_simulator, 0.5, True
info = {}
scoring_method = partial(ucb_scores, ucb_constant=1)
chosen_leaf, s, obs, term, rewards = select_leaf_by_max_scores(
sim=term_sim,
scoring_method=scoring_method,
max_depth=1000,
node=root,
info=info,
state=1,
)
return chosen_leaf, s, obs, term, rewards, info
def test_select_leaf_by_max_scores():
"""A specific test on UCB to see what leaf it returns"""
observation_from_simulator = 2
root = construct_ucb_tree(observation_from_simulator)
chosen_leaf, s, obs, term, rewards, info = run_ucb_select_leaf(
observation_from_simulator, root
)
leaf_action_node = root.action_node(False).observation_node(2).action_node((10, 2))
assert chosen_leaf is leaf_action_node, "constructed tree should lead to leaf"
assert s == 0, "simulator always outputs 0 as state"
assert obs == observation_from_simulator, "better output the correct observation"
assert not term, "simulator should indicate it is not terminal"
assert rewards == [0.5, 0.5], "we did two steps of .5 reward"
assert info["ucb_tree_depth"].max == 2
assert info["ucb_num_terminal_sims"] == 0
assert info["leaf_depth"] == 2
# test max depth
for d in [1, 2]:
chosen_leaf, s, obs, term, rewards, info = run_ucb_select_leaf(
observation_from_simulator, root, max_depth=d
)
assert info["ucb_tree_depth"].max == d
assert info["leaf_depth"] == d
assert info["ucb_num_terminal_sims"] == 0
chosen_leaf, s, obs, term, rewards, info = run_ucb_select_leaf_terminal_sim(
observation_from_simulator, root
)
assert chosen_leaf is root.action_node(
False
), "constructed tree should lead to leaf"
assert s == 0, "simulator always outputs 0 as state"
assert obs == observation_from_simulator, "better output the correct observation"
assert term, "simulator should indicate it is not terminal"
assert rewards == [0.5], "we did two steps of .5 reward"
assert info["leaf_depth"] == 1
def test_select_deterministc_leaf_by_max_scores():
"""Some tests on :func:`select_deterministc_leaf_by_max_scores`"""
node_scoring_method = partial(ucb_scores, ucb_constant=10)
info = {}
# if only one leaf, should find it
root = DeterministicNode(
{"latent_state": "root", "reward": 0.5, "n": 0, "qval": 0.0}, None
)
first_leaf = DeterministicNode(
{"prior": 0.1, "action": "a1", "n": 3, "qval": 0.0}, root
)
root.add_child("a1", first_leaf)
assert select_deterministc_leaf_by_max_scores(node_scoring_method, root, info) == (
first_leaf,
None,
)
assert info["ucb_tree_depth"].max == 1
# a second, better, leaf should be picked instead
second_leaf = DeterministicNode(
{"prior": 0.1, "action": "a2", "n": 3, "qval": 5.0}, root
)
root.add_child("a2", second_leaf)
assert select_deterministc_leaf_by_max_scores(node_scoring_method, root, info) == (
second_leaf,
None,
)
assert info["ucb_tree_depth"].max == 1
assert info["ucb_tree_depth"].num == 2
# trying to add more nodes, should pick it
third_leaf = DeterministicNode(
{"prior": 0.1, "action": "a", "n": 3, "qval": -5.0}, second_leaf
)
second_leaf.add_child("s", third_leaf)
assert select_deterministc_leaf_by_max_scores(node_scoring_method, root, info) == (
third_leaf,
None,
)
assert info["ucb_tree_depth"].max == 2
# increasing q value of first (bad) leaf should make it favourable
first_leaf.stats["qval"] = 10000
assert select_deterministc_leaf_by_max_scores(node_scoring_method, root, info) == (
first_leaf,
None,
)
assert info["ucb_tree_depth"].max == 2
assert info["ucb_tree_depth"].num == 4
def test_backprop_running_q_assertion():
"""Tests that :func:`~online_pomdp_planning.mcts.backprop_running_q` raises bad discount"""
some_obs_node = ObservationNode()
with pytest.raises(AssertionError):
backprop_running_q(-1, ActionNode("gargabe", some_obs_node), [], 0, {})
with pytest.raises(AssertionError):
backprop_running_q(1.1, ActionNode("gargabe", some_obs_node), [], 0, {})
@pytest.mark.parametrize(
"discount_factor, new_q_first, new_q_leaf",
[
(0, 10.3 / 4, 7.0),
(1, 12.3 / 4, 2),
# hard math, let's not do that again (3.4*3 + .1 + .9* 7 + .9*.9*-5)
(0.9, 12.55 / 4, 7 - 4.5),
],
)
def test_backprop_running_q(discount_factor, new_q_first, new_q_leaf):
"""Tests :func:`~online_pomdp_planning.mcts.backprop_running_q`"""
observation_from_simulator = 2
root = construct_ucb_tree(observation_from_simulator)
# fake leaf node
leaf_node = root.action_node(False).observation_node(2).action_node((10, 2))
leaf_selection_output = [0.1, 7.0]
leaf_evaluation = -5
backprop_running_q(
discount_factor, leaf_node, leaf_selection_output, leaf_evaluation, {}
)
# lots of math by hand, hope this never needs to be re-computed
# basically we _know_ the path taken, the rewards, and the original tree
# so we can compute what the updated q-values and 'n' are
# q-values are running average, 'n' is just incremented
assert leaf_node.stats["n"] == 1
assert leaf_node.stats["qval"] == pytest.approx(new_q_leaf)
first_chosen_action_node = root.action_node(False)
assert first_chosen_action_node.stats["qval"] == pytest.approx(new_q_first)
assert first_chosen_action_node.stats["n"] == 4
def test_deterministic_qval_backpropagation():
"""Tests :func:`deterministic_qval_backpropagation"""
q_statistic = MovingStatistic()
q_statistic.add(5)
q_statistic.add(-1)
info = {"q_statistic": q_statistic}
# create tree
root = DeterministicNode(
{"latent_state": "root", "reward": 0.5, "n": 0, "qval": 0.0}, None
)
first_leaf = DeterministicNode(
{"prior": 0.1, "action": "a1", "n": 3, "qval": 0.0, "reward": 0}, root
)
root.add_child(first_leaf.stats["action"], first_leaf)
second_leaf = DeterministicNode(
{"prior": 0.9, "action": "a2", "n": 4, "qval": 5.0, "reward": 0.25}, first_leaf
)
first_leaf.add_child(second_leaf.stats["action"], second_leaf)
deterministic_qval_backpropagation(0.9, second_leaf, None, 9.75, info)
assert info["q_statistic"].max > 5
assert info["q_statistic"].min == -1
assert (
root.stats["n"] == 1
and first_leaf.stats["n"] == 4
and second_leaf.stats["n"] == 5
)
# (5 * 4 + 9.75 + .25) / 5
assert second_leaf.stats["qval"] == 6.0
# return = (9.75 + 0.25) * .9 = 9, (3 * 0 + 9) / 4 = 2.25
assert first_leaf.stats["qval"] == 2.25
# return = 9 * .9 + 0.5 = ..., ... / 1
assert root.stats["qval"] == 9 * 0.9 + 0.5
def test_rollout():
"""Tests :func:`~online_pomdp_planning.mcts.rollout`"""
pol = partial(random_policy, ([False, 1, (10, 2)]))
discount_factor = 0.9
depth = 3
terminal = False
state = 1
obs = 0
def sim(s, a):
"""Fake simulator, returns state 0, obs 2, reward .5 and not terminal"""
return 0, 2, 0.5, False
def term_sim(s, a):
"""Returns the same as :func:`sim` but sets terminal flag to ``True``"""
return 0, 2, 0.5, True
assert (
rollout(pol, term_sim, depth, discount_factor, state, obs, t=True, info={}) == 0
)
assert rollout(pol, term_sim, 0, discount_factor, state, obs, terminal, {}) == 0
assert (
rollout(pol, term_sim, depth, discount_factor, state, obs, terminal, {}) == 0.5
), "terminal sim should allow 1 action"
assert (
rollout(pol, sim, 2, discount_factor, state, obs, terminal, {})
== 0.5 + discount_factor * 0.5
), "1 depth should allow 1 action"
if __name__ == "__main__":
pytest.main([__file__])
|
python
|
from itertools import product
import torch
import dgl
from dgl.data import citation_graph
from dgl.contrib.data import load_data
from dgl import DGLGraph
from runtime.dgl.gcn import GCN, GCNSPMV
from runtime.dgl.gat import GAT, GATSPMV
from runtime.dgl.rgcn import RGCN, RGCNSPMV
from runtime.dgl.train import train_runtime
from runtime.dgl.hidden import HiddenPrint
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
with HiddenPrint():
Cora = citation_graph.load_cora()
CiteSeer = citation_graph.load_citeseer()
PubMed = citation_graph.load_pubmed()
MUTAG = load_data('mutag') # fair comparison
# One training run before we start tracking duration to warm up GPU.
g = DGLGraph(Cora.graph)
g.set_n_initializer(dgl.init.zero_initializer)
g.add_edges(g.nodes(), g.nodes())
norm = torch.pow(g.in_degrees().float(), -0.5)
norm[torch.isinf(norm)] = 0
g.ndata['norm'] = norm.unsqueeze(1).to(device)
model = GCNSPMV(g, Cora.features.shape[1], Cora.num_labels).to(device)
train_runtime(model, Cora, epochs=200, device=device)
for d, Net in product([Cora, CiteSeer, PubMed], [GCN, GCNSPMV, GAT, GATSPMV]):
g = DGLGraph(d.graph)
g.set_n_initializer(dgl.init.zero_initializer)
g.add_edges(g.nodes(), g.nodes())
norm = torch.pow(g.in_degrees().float(), -0.5)
norm[torch.isinf(norm)] = 0
g.ndata['norm'] = norm.unsqueeze(1).to(device)
model = Net(g, d.features.shape[1], d.num_labels).to(device)
t = train_runtime(model, d, epochs=200, device=device)
print(f'{d.name} - {Net.__name__}: {t:.2f}s')
for d, Net in product([MUTAG], [RGCN, RGCNSPMV]):
g = DGLGraph()
g.add_nodes(d.num_nodes)
g.add_edges(d.edge_src, d.edge_dst)
edge_type = torch.from_numpy(d.edge_type).to(device)
edge_norm = torch.from_numpy(d.edge_norm).to(device)
g.edata.update({'type': edge_type, 'norm': edge_norm})
g.ndata['id'] = torch.arange(d.num_nodes, dtype=torch.long, device=device)
model = Net(g, d.num_nodes, d.num_classes, d.num_rels)
t = train_runtime(model, d, epochs=200, device=device)
print(f'{d.name} - {Net.__name__}: {t:.2f}s')
|
python
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import threading
from datetime import date, datetime, timedelta
from psycopg2 import sql
from odoo import api, fields, models, tools, SUPERUSER_ID
from odoo.osv import expression
from odoo.tools.translate import _
from odoo.tools import email_re, email_split
from odoo.exceptions import UserError, AccessError
from odoo.addons.phone_validation.tools import phone_validation
from collections import OrderedDict, defaultdict
from . import crm_stage
_logger = logging.getLogger(__name__)
CRM_LEAD_FIELDS_TO_MERGE = [
'name',
'partner_id',
'campaign_id',
'company_id',
'country_id',
'team_id',
'state_id',
'stage_id',
'medium_id',
'source_id',
'user_id',
'title',
'city',
'contact_name',
'description',
'mobile',
'partner_name',
'phone',
'probability',
'expected_revenue',
'street',
'street2',
'zip',
'create_date',
'date_action_last',
'email_from',
'email_cc',
'website']
# Subset of partner fields: sync any of those
PARTNER_FIELDS_TO_SYNC = [
'mobile',
'title',
'function',
'website',
]
# Subset of partner fields: sync all or none to avoid mixed addresses
PARTNER_ADDRESS_FIELDS_TO_SYNC = [
'street',
'street2',
'city',
'zip',
'state_id',
'country_id',
]
# Those values have been determined based on benchmark to minimise
# computation time, number of transaction and transaction time.
PLS_COMPUTE_BATCH_STEP = 50000 # odoo.models.PREFETCH_MAX = 1000 but larger cluster can speed up global computation
PLS_UPDATE_BATCH_STEP = 5000
class Lead(models.Model):
_name = "crm.lead"
_description = "Lead/Opportunity"
_order = "priority desc, id desc"
_inherit = ['mail.thread.cc',
'mail.thread.blacklist',
'mail.thread.phone',
'mail.activity.mixin',
'utm.mixin',
'format.address.mixin',
'phone.validation.mixin']
_primary_email = 'email_from'
# Description
name = fields.Char(
'Opportunity', index=True, required=True,
compute='_compute_name', readonly=False, store=True)
user_id = fields.Many2one('res.users', string='Salesperson', index=True, tracking=True, default=lambda self: self.env.user)
user_email = fields.Char('User Email', related='user_id.email', readonly=True)
user_login = fields.Char('User Login', related='user_id.login', readonly=True)
company_id = fields.Many2one('res.company', string='Company', index=True, default=lambda self: self.env.company.id)
referred = fields.Char('Referred By')
description = fields.Text('Notes')
active = fields.Boolean('Active', default=True, tracking=True)
type = fields.Selection([
('lead', 'Lead'), ('opportunity', 'Opportunity')],
index=True, required=True, tracking=15,
default=lambda self: 'lead' if self.env['res.users'].has_group('crm.group_use_lead') else 'opportunity')
priority = fields.Selection(
crm_stage.AVAILABLE_PRIORITIES, string='Priority', index=True,
default=crm_stage.AVAILABLE_PRIORITIES[0][0])
team_id = fields.Many2one(
'crm.team', string='Sales Team', index=True, tracking=True,
compute='_compute_team_id', readonly=False, store=True)
stage_id = fields.Many2one(
'crm.stage', string='Stage', index=True, tracking=True,
compute='_compute_stage_id', readonly=False, store=True,
copy=False, group_expand='_read_group_stage_ids', ondelete='restrict',
domain="['|', ('team_id', '=', False), ('team_id', '=', team_id)]")
kanban_state = fields.Selection([
('grey', 'No next activity planned'),
('red', 'Next activity late'),
('green', 'Next activity is planned')], string='Kanban State',
compute='_compute_kanban_state')
activity_date_deadline_my = fields.Date(
'My Activities Deadline', compute='_compute_activity_date_deadline_my',
search='_search_activity_date_deadline_my', compute_sudo=False,
readonly=True, store=False, groups="base.group_user")
tag_ids = fields.Many2many(
'crm.tag', 'crm_tag_rel', 'lead_id', 'tag_id', string='Tags',
help="Classify and analyze your lead/opportunity categories like: Training, Service")
color = fields.Integer('Color Index', default=0)
# Opportunity specific
expected_revenue = fields.Monetary('Expected Revenue', currency_field='company_currency', tracking=True)
prorated_revenue = fields.Monetary('Prorated Revenue', currency_field='company_currency', store=True, compute="_compute_prorated_revenue")
recurring_revenue = fields.Monetary('Recurring Revenues', currency_field='company_currency', groups="crm.group_use_recurring_revenues")
recurring_plan = fields.Many2one('crm.recurring.plan', string="Recurring Plan", groups="crm.group_use_recurring_revenues")
recurring_revenue_monthly = fields.Monetary('Expected MRR', currency_field='company_currency', store=True,
compute="_compute_recurring_revenue_monthly",
groups="crm.group_use_recurring_revenues")
recurring_revenue_monthly_prorated = fields.Monetary('Prorated MRR', currency_field='company_currency', store=True,
compute="_compute_recurring_revenue_monthly_prorated",
groups="crm.group_use_recurring_revenues")
company_currency = fields.Many2one("res.currency", string='Currency', related='company_id.currency_id', readonly=True)
# Dates
date_closed = fields.Datetime('Closed Date', readonly=True, copy=False)
date_action_last = fields.Datetime('Last Action', readonly=True)
date_open = fields.Datetime(
'Assignment Date', compute='_compute_date_open', readonly=True, store=True)
day_open = fields.Float('Days to Assign', compute='_compute_day_open', store=True)
day_close = fields.Float('Days to Close', compute='_compute_day_close', store=True)
date_last_stage_update = fields.Datetime(
'Last Stage Update', compute='_compute_date_last_stage_update', index=True, readonly=True, store=True)
date_conversion = fields.Datetime('Conversion Date', readonly=True)
date_deadline = fields.Date('Expected Closing', help="Estimate of the date on which the opportunity will be won.")
# Customer / contact
partner_id = fields.Many2one(
'res.partner', string='Customer', index=True, tracking=10,
domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]",
help="Linked partner (optional). Usually created when converting the lead. You can find a partner by its Name, TIN, Email or Internal Reference.")
partner_is_blacklisted = fields.Boolean('Partner is blacklisted', related='partner_id.is_blacklisted', readonly=True)
contact_name = fields.Char(
'Contact Name', tracking=30,
compute='_compute_contact_name', readonly=False, store=True)
partner_name = fields.Char(
'Company Name', tracking=20, index=True,
compute='_compute_partner_name', readonly=False, store=True,
help='The name of the future partner company that will be created while converting the lead into opportunity')
function = fields.Char('Job Position', compute='_compute_function', readonly=False, store=True)
title = fields.Many2one('res.partner.title', string='Title', compute='_compute_title', readonly=False, store=True)
email_from = fields.Char(
'Email', tracking=40, index=True,
compute='_compute_email_from', inverse='_inverse_email_from', readonly=False, store=True)
phone = fields.Char(
'Phone', tracking=50,
compute='_compute_phone', inverse='_inverse_phone', readonly=False, store=True)
mobile = fields.Char('Mobile', compute='_compute_mobile', readonly=False, store=True)
phone_mobile_search = fields.Char('Phone/Mobile', store=False, search='_search_phone_mobile_search')
phone_state = fields.Selection([
('correct', 'Correct'),
('incorrect', 'Incorrect')], string='Phone Quality', compute="_compute_phone_state", store=True)
email_state = fields.Selection([
('correct', 'Correct'),
('incorrect', 'Incorrect')], string='Email Quality', compute="_compute_email_state", store=True)
website = fields.Char('Website', index=True, help="Website of the contact", compute="_compute_website", readonly=False, store=True)
lang_id = fields.Many2one('res.lang', string='Language')
# Address fields
street = fields.Char('Street', compute='_compute_partner_address_values', readonly=False, store=True)
street2 = fields.Char('Street2', compute='_compute_partner_address_values', readonly=False, store=True)
zip = fields.Char('Zip', change_default=True, compute='_compute_partner_address_values', readonly=False, store=True)
city = fields.Char('City', compute='_compute_partner_address_values', readonly=False, store=True)
state_id = fields.Many2one(
"res.country.state", string='State',
compute='_compute_partner_address_values', readonly=False, store=True,
domain="[('country_id', '=?', country_id)]")
country_id = fields.Many2one(
'res.country', string='Country',
compute='_compute_partner_address_values', readonly=False, store=True)
# Probability (Opportunity only)
probability = fields.Float(
'Probability', group_operator="avg", copy=False,
compute='_compute_probabilities', readonly=False, store=True)
automated_probability = fields.Float('Automated Probability', compute='_compute_probabilities', readonly=True, store=True)
is_automated_probability = fields.Boolean('Is automated probability?', compute="_compute_is_automated_probability")
# External records
meeting_count = fields.Integer('# Meetings', compute='_compute_meeting_count')
lost_reason = fields.Many2one(
'crm.lost.reason', string='Lost Reason',
index=True, ondelete='restrict', tracking=True)
ribbon_message = fields.Char('Ribbon message', compute='_compute_ribbon_message')
_sql_constraints = [
('check_probability', 'check(probability >= 0 and probability <= 100)', 'The probability of closing the deal should be between 0% and 100%!')
]
@api.depends('activity_date_deadline')
def _compute_kanban_state(self):
today = date.today()
for lead in self:
kanban_state = 'grey'
if lead.activity_date_deadline:
lead_date = fields.Date.from_string(lead.activity_date_deadline)
if lead_date >= today:
kanban_state = 'green'
else:
kanban_state = 'red'
lead.kanban_state = kanban_state
@api.depends('activity_ids.date_deadline')
@api.depends_context('uid')
def _compute_activity_date_deadline_my(self):
todo_activities = []
if self.ids:
todo_activities = self.env['mail.activity'].search([
('user_id', '=', self._uid),
('res_model', '=', self._name),
('res_id', 'in', self.ids)
], order='date_deadline ASC')
for record in self:
record.activity_date_deadline_my = next(
(activity.date_deadline for activity in todo_activities if activity.res_id == record.id),
False
)
def _search_activity_date_deadline_my(self, operator, operand):
return ['&', ('activity_ids.user_id', '=', self._uid), ('activity_ids.date_deadline', operator, operand)]
@api.depends('user_id', 'type')
def _compute_team_id(self):
""" When changing the user, also set a team_id or restrict team id
to the ones user_id is member of. """
for lead in self:
# setting user as void should not trigger a new team computation
if not lead.user_id:
continue
user = lead.user_id
if lead.team_id and user in lead.team_id.member_ids | lead.team_id.user_id:
continue
team_domain = [('use_leads', '=', True)] if lead.type == 'lead' else [('use_opportunities', '=', True)]
team = self.env['crm.team']._get_default_team_id(user_id=user.id, domain=team_domain)
lead.team_id = team.id
@api.depends('team_id', 'type')
def _compute_stage_id(self):
for lead in self:
if not lead.stage_id:
lead.stage_id = lead._stage_find(domain=[('fold', '=', False)]).id
@api.depends('user_id')
def _compute_date_open(self):
for lead in self:
lead.date_open = fields.Datetime.now() if lead.user_id else False
@api.depends('stage_id')
def _compute_date_last_stage_update(self):
for lead in self:
lead.date_last_stage_update = fields.Datetime.now()
@api.depends('create_date', 'date_open')
def _compute_day_open(self):
""" Compute difference between create date and open date """
leads = self.filtered(lambda l: l.date_open and l.create_date)
others = self - leads
others.day_open = None
for lead in leads:
date_create = fields.Datetime.from_string(lead.create_date).replace(microsecond=0)
date_open = fields.Datetime.from_string(lead.date_open)
lead.day_open = abs((date_open - date_create).days)
@api.depends('create_date', 'date_closed')
def _compute_day_close(self):
""" Compute difference between current date and log date """
leads = self.filtered(lambda l: l.date_closed and l.create_date)
others = self - leads
others.day_close = None
for lead in leads:
date_create = fields.Datetime.from_string(lead.create_date)
date_close = fields.Datetime.from_string(lead.date_closed)
lead.day_close = abs((date_close - date_create).days)
@api.depends('partner_id')
def _compute_name(self):
for lead in self:
if not lead.name and lead.partner_id and lead.partner_id.name:
lead.name = _("%s's opportunity") % lead.partner_id.name
@api.depends('partner_id')
def _compute_contact_name(self):
""" compute the new values when partner_id has changed """
for lead in self:
lead.update(lead._prepare_contact_name_from_partner(lead.partner_id))
@api.depends('partner_id')
def _compute_partner_name(self):
""" compute the new values when partner_id has changed """
for lead in self:
lead.update(lead._prepare_partner_name_from_partner(lead.partner_id))
@api.depends('partner_id')
def _compute_function(self):
""" compute the new values when partner_id has changed """
for lead in self:
if not lead.function or lead.partner_id.function:
lead.function = lead.partner_id.function
@api.depends('partner_id')
def _compute_title(self):
""" compute the new values when partner_id has changed """
for lead in self:
if not lead.title or lead.partner_id.title:
lead.title = lead.partner_id.title
@api.depends('partner_id')
def _compute_mobile(self):
""" compute the new values when partner_id has changed """
for lead in self:
if not lead.mobile or lead.partner_id.mobile:
lead.mobile = lead.partner_id.mobile
@api.depends('partner_id')
def _compute_website(self):
""" compute the new values when partner_id has changed """
for lead in self:
if not lead.website or lead.partner_id.website:
lead.website = lead.partner_id.website
@api.depends('partner_id')
def _compute_partner_address_values(self):
""" Sync all or none of address fields """
for lead in self:
lead.update(lead._prepare_address_values_from_partner(lead.partner_id))
@api.depends('partner_id.email')
def _compute_email_from(self):
for lead in self:
if lead.partner_id.email and lead.partner_id.email != lead.email_from:
lead.email_from = lead.partner_id.email
def _inverse_email_from(self):
for lead in self:
if lead.partner_id and lead.email_from != lead.partner_id.email:
# force reset
if not lead.email_from or not lead.partner_id.email:
lead.partner_id.email = lead.email_from
# compare formatted values as we may have formatting differences between equivalent email
else:
lead_email_normalized = tools.email_normalize(lead.email_from)
partner_email_normalized = tools.email_normalize(lead.partner_id.email)
if lead_email_normalized != partner_email_normalized:
lead.partner_id.email = lead.email_from
@api.depends('partner_id.phone')
def _compute_phone(self):
for lead in self:
if lead.partner_id.phone and lead.phone != lead.partner_id.phone:
lead.phone = lead.partner_id.phone
def _inverse_phone(self):
for lead in self:
if lead.partner_id and lead.phone != lead.partner_id.phone:
# force reset
if not lead.phone or not lead.partner_id.phone:
lead.partner_id.phone = lead.phone
# compare formatted values as we may have encoding differences between equivalent numbers
else:
lead_phone_formatted = lead.phone_format(lead.phone)
partner_phone_formatted = lead.phone_format(lead.partner_id.phone)
if lead_phone_formatted != partner_phone_formatted:
lead.partner_id.phone = lead.phone
@api.depends('phone', 'country_id.code')
def _compute_phone_state(self):
for lead in self:
phone_status = False
if lead.phone:
country_code = lead.country_id.code if lead.country_id and lead.country_id.code else None
try:
if phone_validation.phone_parse(lead.phone, country_code): # otherwise library not installed
phone_status = 'correct'
except UserError:
phone_status = 'incorrect'
lead.phone_state = phone_status
@api.depends('email_from')
def _compute_email_state(self):
for lead in self:
email_state = False
if lead.email_from:
email_state = 'incorrect'
for email in email_split(lead.email_from):
if tools.email_normalize(email):
email_state = 'correct'
break
lead.email_state = email_state
@api.depends('probability', 'automated_probability')
def _compute_is_automated_probability(self):
""" If probability and automated_probability are equal probability computation
is considered as automatic, aka probability is sync with automated_probability """
for lead in self:
lead.is_automated_probability = tools.float_compare(lead.probability, lead.automated_probability, 2) == 0
@api.depends(lambda self: ['tag_ids', 'stage_id', 'team_id'] + self._pls_get_safe_fields())
def _compute_probabilities(self):
lead_probabilities = self._pls_get_naive_bayes_probabilities()
for lead in self:
if lead.id in lead_probabilities:
was_automated = lead.active and lead.is_automated_probability
lead.automated_probability = lead_probabilities[lead.id]
if was_automated:
lead.probability = lead.automated_probability
@api.depends('expected_revenue', 'probability')
def _compute_prorated_revenue(self):
for lead in self:
lead.prorated_revenue = round((lead.expected_revenue or 0.0) * (lead.probability or 0) / 100.0, 2)
@api.depends('recurring_revenue', 'recurring_plan.number_of_months')
def _compute_recurring_revenue_monthly(self):
for lead in self:
lead.recurring_revenue_monthly = (lead.recurring_revenue or 0.0) / (lead.recurring_plan.number_of_months or 1)
@api.depends('recurring_revenue_monthly', 'probability')
def _compute_recurring_revenue_monthly_prorated(self):
for lead in self:
lead.recurring_revenue_monthly_prorated = (lead.recurring_revenue_monthly or 0.0) * (lead.probability or 0) / 100.0
def _compute_meeting_count(self):
if self.ids:
meeting_data = self.env['calendar.event'].sudo().read_group([
('opportunity_id', 'in', self.ids)
], ['opportunity_id'], ['opportunity_id'])
mapped_data = {m['opportunity_id'][0]: m['opportunity_id_count'] for m in meeting_data}
else:
mapped_data = dict()
for lead in self:
lead.meeting_count = mapped_data.get(lead.id, 0)
@api.depends('email_from', 'phone', 'partner_id')
def _compute_ribbon_message(self):
for lead in self:
# beware: void user input gives '' which is different from False
lead_email_normalized = tools.email_normalize(lead.email_from) or (lead.email_from if lead.email_from else False)
partner_email_normalized = tools.email_normalize(lead.partner_id.email) or lead.partner_id.email
will_write_email = lead_email_normalized != partner_email_normalized if lead.partner_id else False
will_write_phone = False
if lead.partner_id and lead.phone != lead.partner_id.phone:
# if reset -> obviously new value will be propagated
if not lead.phone or not lead.partner_id.phone:
will_write_phone = True
# otherwise compare formatted values as we may have encoding differences
else:
lead_phone_formatted = lead.phone_format(lead.phone)
partner_phone_formatted = lead.phone_format(lead.partner_id.phone)
if lead_phone_formatted != partner_phone_formatted:
will_write_phone = True
if will_write_email and will_write_phone:
lead.ribbon_message = _('By saving this change, the customer email and phone number will also be updated.')
elif will_write_email:
lead.ribbon_message = _('By saving this change, the customer email will also be updated.')
elif will_write_phone:
lead.ribbon_message = _('By saving this change, the customer phone number will also be updated.')
else:
lead.ribbon_message = False
def _search_phone_mobile_search(self, operator, value):
if len(value) <= 2:
raise UserError(_('Please enter at least 3 digits when searching on phone / mobile.'))
query = f"""
SELECT model.id
FROM {self._table} model
WHERE REGEXP_REPLACE(model.phone, '[^\d+]+', '', 'g') SIMILAR TO CONCAT(%s, REGEXP_REPLACE(%s, '\D+', '', 'g'), '%%')
OR REGEXP_REPLACE(model.mobile, '[^\d+]+', '', 'g') SIMILAR TO CONCAT(%s, REGEXP_REPLACE(%s, '\D+', '', 'g'), '%%')
"""
# searching on +32485112233 should also finds 00485112233 (00 / + prefix are both valid)
# we therefore remove it from input value and search for both of them in db
if value.startswith('+') or value.startswith('00'):
if value.startswith('00'):
value = value[2:]
starts_with = '00|\+'
else:
starts_with = '%'
self._cr.execute(query, (starts_with, value, starts_with, value))
res = self._cr.fetchall()
if not res:
return [(0, '=', 1)]
return [('id', 'in', [r[0] for r in res])]
@api.onchange('phone', 'country_id', 'company_id')
def _onchange_phone_validation(self):
if self.phone:
self.phone = self.phone_format(self.phone)
@api.onchange('mobile', 'country_id', 'company_id')
def _onchange_mobile_validation(self):
if self.mobile:
self.mobile = self.phone_format(self.mobile)
def _prepare_values_from_partner(self, partner):
""" Get a dictionary with values coming from partner information to
copy on a lead. Non-address fields get the current lead
values to avoid being reset if partner has no value for them. """
# Sync all address fields from partner, or none, to avoid mixing them.
values = self._prepare_address_values_from_partner(partner)
# For other fields, get the info from the partner, but only if set
values.update({f: partner[f] or self[f] for f in PARTNER_FIELDS_TO_SYNC})
# Fields with specific logic
values.update(self._prepare_contact_name_from_partner(partner))
values.update(self._prepare_partner_name_from_partner(partner))
return self._convert_to_write(values)
def _prepare_address_values_from_partner(self, partner):
# Sync all address fields from partner, or none, to avoid mixing them.
if any(partner[f] for f in PARTNER_ADDRESS_FIELDS_TO_SYNC):
values = {f: partner[f] for f in PARTNER_ADDRESS_FIELDS_TO_SYNC}
else:
values = {f: self[f] for f in PARTNER_ADDRESS_FIELDS_TO_SYNC}
return values
def _prepare_contact_name_from_partner(self, partner):
contact_name = False if partner.is_company else partner.name
return {'contact_name': contact_name or self.contact_name}
def _prepare_partner_name_from_partner(self, partner):
partner_name = partner.parent_id.name
if not partner_name and partner.is_company:
partner_name = partner.name
return {'partner_name': partner_name or self.partner_name}
# ------------------------------------------------------------
# ORM
# ------------------------------------------------------------
def _auto_init(self):
res = super(Lead, self)._auto_init()
tools.create_index(self._cr, 'crm_lead_user_id_team_id_type_index',
self._table, ['user_id', 'team_id', 'type'])
tools.create_index(self._cr, 'crm_lead_create_date_team_id_idx',
self._table, ['create_date', 'team_id'])
return res
@api.model_create_multi
def create(self, vals_list):
for vals in vals_list:
if vals.get('website'):
vals['website'] = self.env['res.partner']._clean_website(vals['website'])
leads = super(Lead, self).create(vals_list)
for lead, values in zip(leads, vals_list):
if any(field in ['active', 'stage_id'] for field in values):
lead._handle_won_lost(values)
return leads
def write(self, vals):
if vals.get('website'):
vals['website'] = self.env['res.partner']._clean_website(vals['website'])
# stage change: update date_last_stage_update
if 'stage_id' in vals:
stage_id = self.env['crm.stage'].browse(vals['stage_id'])
if stage_id.is_won:
vals.update({'probability': 100, 'automated_probability': 100})
# stage change with new stage: update probability and date_closed
if vals.get('probability', 0) >= 100 or not vals.get('active', True):
vals['date_closed'] = fields.Datetime.now()
elif 'probability' in vals:
vals['date_closed'] = False
if any(field in ['active', 'stage_id'] for field in vals):
self._handle_won_lost(vals)
write_result = super(Lead, self).write(vals)
return write_result
@api.model
def search(self, args, offset=0, limit=None, order=None, count=False):
""" Override to support ordering on activity_date_deadline_my.
Ordering through web client calls search_read with an order parameter set.
Search_read then calls search. In this override we therefore override search
to intercept a search without count with an order on activity_date_deadline_my.
In that case we do the search in two steps.
First step: fill with deadline-based results
* Perform a read_group on my activities to get a mapping lead_id / deadline
Remember date_deadline is required, we always have a value for it. Only
the earliest deadline per lead is kept.
* Search leads linked to those activities that also match the asked domain
and order from the original search request.
* Results of that search will be at the top of returned results. Use limit
None because we have to search all leads linked to activities as ordering
on deadline is done in post processing.
* Reorder them according to deadline asc or desc depending on original
search ordering. Finally take only a subset of those leads to fill with
results matching asked offset / limit.
Second step: fill with other results. If first step does not gives results
enough to match offset and limit parameters we fill with a search on other
leads. We keep the asked domain and ordering while filtering out already
scanned leads to keep a coherent results.
All other search and search_read are left untouched by this override to avoid
side effects. Search_count is not affected by this override.
"""
if count or not order or 'activity_date_deadline_my' not in order:
return super(Lead, self).search(args, offset=offset, limit=limit, order=order, count=count)
order_items = [order_item.strip().lower() for order_item in (order or self._order).split(',')]
# Perform a read_group on my activities to get a mapping lead_id / deadline
# Remember date_deadline is required, we always have a value for it. Only
# the earliest deadline per lead is kept.
activity_asc = any('activity_date_deadline_my asc' in item for item in order_items)
my_lead_activities = self.env['mail.activity'].read_group(
[('res_model', '=', self._name), ('user_id', '=', self.env.uid)],
['res_id', 'date_deadline:min'],
['res_id'],
orderby='date_deadline ASC'
)
my_lead_mapping = dict((item['res_id'], item['date_deadline']) for item in my_lead_activities)
my_lead_ids = list(my_lead_mapping.keys())
my_lead_domain = expression.AND([[('id', 'in', my_lead_ids)], args])
my_lead_order = ', '.join(item for item in order_items if 'activity_date_deadline_my' not in item)
# Search leads linked to those activities and order them. See docstring
# of this method for more details.
search_res = super(Lead, self).search(my_lead_domain, offset=0, limit=None, order=my_lead_order, count=count)
my_lead_ids_ordered = sorted(search_res.ids, key=lambda lead_id: my_lead_mapping[lead_id], reverse=not activity_asc)
# keep only requested window (offset + limit, or offset+)
my_lead_ids_keep = my_lead_ids_ordered[offset:(offset + limit)] if limit else my_lead_ids_ordered[offset:]
# keep list of already skipped lead ids to exclude them from future search
my_lead_ids_skip = my_lead_ids_ordered[:(offset + limit)] if limit else my_lead_ids_ordered
# do not go further if limit is achieved
if limit and len(my_lead_ids_keep) >= limit:
return self.browse(my_lead_ids_keep)
# Fill with remaining leads. If a limit is given, simply remove count of
# already fetched. Otherwise keep none. If an offset is set we have to
# reduce it by already fetch results hereabove. Order is updated to exclude
# activity_date_deadline_my when calling super() .
lead_limit = (limit - len(my_lead_ids_keep)) if limit else None
if offset:
lead_offset = max((offset - len(search_res), 0))
else:
lead_offset = 0
lead_order = ', '.join(item for item in order_items if 'activity_date_deadline_my' not in item)
other_lead_res = super(Lead, self).search(
expression.AND([[('id', 'not in', my_lead_ids_skip)], args]),
offset=lead_offset, limit=lead_limit, order=lead_order, count=count
)
return self.browse(my_lead_ids_keep) + other_lead_res
def _handle_won_lost(self, vals):
""" This method handle the state changes :
- To lost : We need to increment corresponding lost count in scoring frequency table
- To won : We need to increment corresponding won count in scoring frequency table
- From lost to Won : We need to decrement corresponding lost count + increment corresponding won count
in scoring frequency table.
- From won to lost : We need to decrement corresponding won count + increment corresponding lost count
in scoring frequency table."""
Lead = self.env['crm.lead']
leads_reach_won = Lead
leads_leave_won = Lead
leads_reach_lost = Lead
leads_leave_lost = Lead
won_stage_ids = self.env['crm.stage'].search([('is_won', '=', True)]).ids
for lead in self:
if 'stage_id' in vals:
if vals['stage_id'] in won_stage_ids:
if lead.probability == 0:
leads_leave_lost |= lead
leads_reach_won |= lead
elif lead.stage_id.id in won_stage_ids and lead.active: # a lead can be lost at won_stage
leads_leave_won |= lead
if 'active' in vals:
if not vals['active'] and lead.active: # archive lead
if lead.stage_id.id in won_stage_ids and lead not in leads_leave_won:
leads_leave_won |= lead
leads_reach_lost |= lead
elif vals['active'] and not lead.active: # restore lead
leads_leave_lost |= lead
leads_reach_won._pls_increment_frequencies(to_state='won')
leads_leave_won._pls_increment_frequencies(from_state='won')
leads_reach_lost._pls_increment_frequencies(to_state='lost')
leads_leave_lost._pls_increment_frequencies(from_state='lost')
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
self.ensure_one()
# set default value in context, if not already set (Put stage to 'new' stage)
context = dict(self._context)
context.setdefault('default_type', self.type)
context.setdefault('default_team_id', self.team_id.id)
# Set date_open to today if it is an opp
default = default or {}
default['date_open'] = fields.Datetime.now() if self.type == 'opportunity' else False
# Do not assign to an archived user
if not self.user_id.active:
default['user_id'] = False
if not self.env.user.has_group('crm.group_use_recurring_revenues'):
default['recurring_revenue'] = 0
default['recurring_plan'] = False
return super(Lead, self.with_context(context)).copy(default=default)
@api.model
def _fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
if self._context.get('opportunity_id'):
opportunity = self.browse(self._context['opportunity_id'])
action = opportunity.get_formview_action()
if action.get('views') and any(view_id for view_id in action['views'] if view_id[1] == view_type):
view_id = next(view_id[0] for view_id in action['views'] if view_id[1] == view_type)
res = super(Lead, self)._fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
if view_type == 'form':
res['arch'] = self._fields_view_get_address(res['arch'])
return res
@api.model
def _read_group_stage_ids(self, stages, domain, order):
# retrieve team_id from the context and write the domain
# - ('id', 'in', stages.ids): add columns that should be present
# - OR ('fold', '=', False): add default columns that are not folded
# - OR ('team_ids', '=', team_id), ('fold', '=', False) if team_id: add team columns that are not folded
team_id = self._context.get('default_team_id')
if team_id:
search_domain = ['|', ('id', 'in', stages.ids), '|', ('team_id', '=', False), ('team_id', '=', team_id)]
else:
search_domain = ['|', ('id', 'in', stages.ids), ('team_id', '=', False)]
# perform search
stage_ids = stages._search(search_domain, order=order, access_rights_uid=SUPERUSER_ID)
return stages.browse(stage_ids)
def _stage_find(self, team_id=False, domain=None, order='sequence'):
""" Determine the stage of the current lead with its teams, the given domain and the given team_id
:param team_id
:param domain : base search domain for stage
:returns crm.stage recordset
"""
# collect all team_ids by adding given one, and the ones related to the current leads
team_ids = set()
if team_id:
team_ids.add(team_id)
for lead in self:
if lead.team_id:
team_ids.add(lead.team_id.id)
# generate the domain
if team_ids:
search_domain = ['|', ('team_id', '=', False), ('team_id', 'in', list(team_ids))]
else:
search_domain = [('team_id', '=', False)]
# AND with the domain in parameter
if domain:
search_domain += list(domain)
# perform search, return the first found
return self.env['crm.stage'].search(search_domain, order=order, limit=1)
# ------------------------------------------------------------
# ACTIONS
# ------------------------------------------------------------
def toggle_active(self):
""" When archiving: mark probability as 0. When re-activating
update probability again, for leads and opportunities. """
res = super(Lead, self).toggle_active()
activated = self.filtered(lambda lead: lead.active)
archived = self.filtered(lambda lead: not lead.active)
if activated:
activated.write({'lost_reason': False})
activated._compute_probabilities()
if archived:
archived.write({'probability': 0, 'automated_probability': 0})
return res
def action_set_lost(self, **additional_values):
""" Lost semantic: probability = 0 or active = False """
res = self.action_archive()
if additional_values:
self.write(dict(additional_values))
return res
def action_set_won(self):
""" Won semantic: probability = 100 (active untouched) """
self.action_unarchive()
# group the leads by team_id, in order to write once by values couple (each write leads to frequency increment)
leads_by_won_stage = {}
for lead in self:
stage_id = lead._stage_find(domain=[('is_won', '=', True)])
if stage_id in leads_by_won_stage:
leads_by_won_stage[stage_id] |= lead
else:
leads_by_won_stage[stage_id] = lead
for won_stage_id, leads in leads_by_won_stage.items():
leads.write({'stage_id': won_stage_id.id, 'probability': 100})
return True
def action_set_automated_probability(self):
self.write({'probability': self.automated_probability})
def action_set_won_rainbowman(self):
self.ensure_one()
self.action_set_won()
message = self._get_rainbowman_message()
if message:
return {
'effect': {
'fadeout': 'slow',
'message': message,
'img_url': '/web/image/%s/%s/image_1024' % (self.team_id.user_id._name, self.team_id.user_id.id) if self.team_id.user_id.image_1024 else '/web/static/src/img/smile.svg',
'type': 'rainbow_man',
}
}
return True
def get_rainbowman_message(self):
self.ensure_one()
if self.stage_id.is_won:
return self._get_rainbowman_message()
return False
def _get_rainbowman_message(self):
message = False
if self.user_id and self.team_id and self.expected_revenue:
self.flush() # flush fields to make sure DB is up to date
query = """
SELECT
SUM(CASE WHEN user_id = %(user_id)s THEN 1 ELSE 0 END) as total_won,
MAX(CASE WHEN date_closed >= CURRENT_DATE - INTERVAL '30 days' AND user_id = %(user_id)s THEN expected_revenue ELSE 0 END) as max_user_30,
MAX(CASE WHEN date_closed >= CURRENT_DATE - INTERVAL '7 days' AND user_id = %(user_id)s THEN expected_revenue ELSE 0 END) as max_user_7,
MAX(CASE WHEN date_closed >= CURRENT_DATE - INTERVAL '30 days' AND team_id = %(team_id)s THEN expected_revenue ELSE 0 END) as max_team_30,
MAX(CASE WHEN date_closed >= CURRENT_DATE - INTERVAL '7 days' AND team_id = %(team_id)s THEN expected_revenue ELSE 0 END) as max_team_7
FROM crm_lead
WHERE
type = 'opportunity'
AND
active = True
AND
probability = 100
AND
DATE_TRUNC('year', date_closed) = DATE_TRUNC('year', CURRENT_DATE)
AND
(user_id = %(user_id)s OR team_id = %(team_id)s)
"""
self.env.cr.execute(query, {'user_id': self.user_id.id,
'team_id': self.team_id.id})
query_result = self.env.cr.dictfetchone()
if query_result['total_won'] == 1:
message = _('Go, go, go! Congrats for your first deal.')
elif query_result['max_team_30'] == self.expected_revenue:
message = _('Boom! Team record for the past 30 days.')
elif query_result['max_team_7'] == self.expected_revenue:
message = _('Yeah! Deal of the last 7 days for the team.')
elif query_result['max_user_30'] == self.expected_revenue:
message = _('You just beat your personal record for the past 30 days.')
elif query_result['max_user_7'] == self.expected_revenue:
message = _('You just beat your personal record for the past 7 days.')
return message
def action_schedule_meeting(self):
""" Open meeting's calendar view to schedule meeting on current opportunity.
:return dict: dictionary value for created Meeting view
"""
self.ensure_one()
action = self.env["ir.actions.actions"]._for_xml_id("calendar.action_calendar_event")
partner_ids = self.env.user.partner_id.ids
if self.partner_id:
partner_ids.append(self.partner_id.id)
action['context'] = {
'default_opportunity_id': self.id if self.type == 'opportunity' else False,
'default_partner_id': self.partner_id.id,
'default_partner_ids': partner_ids,
'default_team_id': self.team_id.id,
'default_name': self.name,
}
return action
def action_snooze(self):
self.ensure_one()
today = date.today()
my_next_activity = self.activity_ids.filtered(lambda activity: activity.user_id == self.env.user)[:1]
if my_next_activity:
if my_next_activity.date_deadline < today:
date_deadline = today + timedelta(days=7)
else:
date_deadline = my_next_activity.date_deadline + timedelta(days=7)
my_next_activity.write({
'date_deadline': date_deadline
})
return True
# ------------------------------------------------------------
# BUSINESS
# ------------------------------------------------------------
def log_meeting(self, meeting_subject, meeting_date, duration):
if not duration:
duration = _('unknown')
else:
duration = str(duration)
meet_date = fields.Datetime.from_string(meeting_date)
meeting_usertime = fields.Datetime.to_string(fields.Datetime.context_timestamp(self, meet_date))
html_time = "<time datetime='%s+00:00'>%s</time>" % (meeting_date, meeting_usertime)
message = _("Meeting scheduled at '%s'<br> Subject: %s <br> Duration: %s hours") % (html_time, meeting_subject, duration)
return self.message_post(body=message)
# ------------------------------------------------------------
# MERGE LEADS / OPPS
# ------------------------------------------------------------
def _merge_get_result_type(self):
""" Define the type of the result of the merge. If at least one of the
element to merge is an opp, the resulting new element will be an opp.
Otherwise it will be a lead. """
if any(record.type == 'opportunity' for record in self):
return 'opportunity'
return 'lead'
def _merge_data(self, fields):
""" Prepare lead/opp data into a dictionary for merging. Different types
of fields are processed in different ways:
- text: all the values are concatenated
- m2m and o2m: those fields aren't processed
- m2o: the first not null value prevails (the other are dropped)
- any other type of field: same as m2o
:param fields: list of fields to process
:return dict data: contains the merged values of the new opportunity
"""
# helpers
def _get_first_not_null(attr, opportunities):
for opp in opportunities:
val = opp[attr]
if val:
return val
return False
def _get_first_not_null_id(attr, opportunities):
res = _get_first_not_null(attr, opportunities)
return res.id if res else False
# process the fields' values
data = {}
for field_name in fields:
field = self._fields.get(field_name)
if field is None:
continue
if field.type in ('many2many', 'one2many'):
continue
elif field.type == 'many2one':
data[field_name] = _get_first_not_null_id(field_name, self) # take the first not null
elif field.type == 'text':
data[field_name] = '\n\n'.join(it for it in self.mapped(field_name) if it)
else:
data[field_name] = _get_first_not_null(field_name, self)
# define the resulting type ('lead' or 'opportunity')
data['type'] = self._merge_get_result_type()
return data
def _merge_notify_get_merged_fields_message(self, fields):
""" Generate the message body with the changed values
:param fields : list of fields to track
:returns a list of message bodies for the corresponding leads
"""
bodies = []
for lead in self:
title = "%s : %s\n" % (_('Merged opportunity') if lead.type == 'opportunity' else _('Merged lead'), lead.name)
body = [title]
_fields = self.env['ir.model.fields'].search([
('name', 'in', fields or []),
('model_id.model', '=', lead._name),
])
for field in _fields:
value = getattr(lead, field.name, False)
if field.ttype == 'selection':
selections = lead.fields_get()[field.name]['selection']
value = next((v[1] for v in selections if v[0] == value), value)
elif field.ttype == 'many2one':
if value:
value = value.sudo().display_name
elif field.ttype == 'many2many':
if value:
value = ','.join(
val.display_name
for val in value.sudo()
)
body.append("%s: %s" % (field.field_description, value or ''))
bodies.append("<br/>".join(body + ['<br/>']))
return bodies
def _merge_notify(self, opportunities):
""" Post a message gathering merged leads/opps informations. It explains
which fields has been merged and their new value. `self` is the resulting
merge crm.lead record.
:param opportunities: see ``merge_dependences``
"""
# TODO JEM: mail template should be used instead of fix body, subject text
self.ensure_one()
# mail message's subject
result_type = opportunities._merge_get_result_type()
merge_message = _('Merged leads') if result_type == 'lead' else _('Merged opportunities')
subject = merge_message + ": " + ", ".join(opportunities.mapped('name'))
# message bodies
message_bodies = opportunities._merge_notify_get_merged_fields_message(list(CRM_LEAD_FIELDS_TO_MERGE))
message_body = "\n\n".join(message_bodies)
return self.message_post(body=message_body, subject=subject)
def _merge_opportunity_history(self, opportunities):
""" Move mail.message from the given opportunities to the current one. `self` is the
crm.lead record destination for message of `opportunities`.
:param opportunities: see ``merge_dependences``
"""
self.ensure_one()
for opportunity in opportunities:
for message in opportunity.message_ids:
if message.subject:
subject = _("From %(source_name)s : %(source_subject)s", source_name=opportunity.name, source_subject=message.subject)
else:
subject = _("From %(source_name)s", source_name=opportunity.name)
message.write({
'res_id': self.id,
'subject': subject,
})
return True
def _merge_opportunity_attachments(self, opportunities):
""" Move attachments of given opportunities to the current one `self`, and rename
the attachments having same name than native ones.
:param opportunities: see ``merge_dependences``
"""
self.ensure_one()
# return attachments of opportunity
def _get_attachments(opportunity_id):
return self.env['ir.attachment'].search([('res_model', '=', self._name), ('res_id', '=', opportunity_id)])
first_attachments = _get_attachments(self.id)
# counter of all attachments to move. Used to make sure the name is different for all attachments
count = 1
for opportunity in opportunities:
attachments = _get_attachments(opportunity.id)
for attachment in attachments:
values = {'res_id': self.id}
for attachment_in_first in first_attachments:
if attachment.name == attachment_in_first.name:
values['name'] = "%s (%s)" % (attachment.name, count)
count += 1
attachment.write(values)
return True
def merge_dependences(self, opportunities):
""" Merge dependences (messages, attachments, ...). These dependences will be
transfered to `self`, the most important lead.
:param opportunities : recordset of opportunities to transfer. Does not
include `self` which is the target crm.lead being the result of the merge.
"""
self.ensure_one()
self._merge_notify(opportunities)
self._merge_opportunity_history(opportunities)
self._merge_opportunity_attachments(opportunities)
def merge_opportunity(self, user_id=False, team_id=False, auto_unlink=True):
""" Merge opportunities in one. Different cases of merge:
- merge leads together = 1 new lead
- merge at least 1 opp with anything else (lead or opp) = 1 new opp
The resulting lead/opportunity will be the most important one (based on its confidence level)
updated with values from other opportunities to merge.
:param user_id : the id of the saleperson. If not given, will be determined by `_merge_data`.
:param team : the id of the Sales Team. If not given, will be determined by `_merge_data`.
:return crm.lead record resulting of th merge
"""
if len(self.ids) <= 1:
raise UserError(_('Please select more than one element (lead or opportunity) from the list view.'))
if len(self.ids) > 5 and not self.env.is_superuser():
raise UserError(_("To prevent data loss, Leads and Opportunities can only be merged by groups of 5."))
opportunities = self._sort_by_confidence_level(reverse=True)
# get SORTED recordset of head and tail, and complete list
opportunities_head = opportunities[0]
opportunities_tail = opportunities[1:]
# merge all the sorted opportunity. This means the value of
# the first (head opp) will be a priority.
merged_data = opportunities._merge_data(list(CRM_LEAD_FIELDS_TO_MERGE))
# force value for saleperson and Sales Team
if user_id:
merged_data['user_id'] = user_id
if team_id:
merged_data['team_id'] = team_id
# merge other data (mail.message, attachments, ...) from tail into head
opportunities_head.merge_dependences(opportunities_tail)
# check if the stage is in the stages of the Sales Team. If not, assign the stage with the lowest sequence
if merged_data.get('team_id'):
team_stage_ids = self.env['crm.stage'].search(['|', ('team_id', '=', merged_data['team_id']), ('team_id', '=', False)], order='sequence')
if merged_data.get('stage_id') not in team_stage_ids.ids:
merged_data['stage_id'] = team_stage_ids[0].id if team_stage_ids else False
# write merged data into first opportunity
opportunities_head.write(merged_data)
# delete tail opportunities
# we use the SUPERUSER to avoid access rights issues because as the user had the rights to see the records it should be safe to do so
if auto_unlink:
opportunities_tail.sudo().unlink()
return opportunities_head
def _sort_by_confidence_level(self, reverse=False):
""" Sorting the leads/opps according to the confidence level of its stage, which relates to the probability of winning it
The confidence level increases with the stage sequence
An Opportunity always has higher confidence level than a lead
"""
def opps_key(opportunity):
return opportunity.type == 'opportunity', opportunity.stage_id.sequence, -opportunity._origin.id
return self.sorted(key=opps_key, reverse=reverse)
def _convert_opportunity_data(self, customer, team_id=False):
""" Extract the data from a lead to create the opportunity
:param customer : res.partner record
:param team_id : identifier of the Sales Team to determine the stage
"""
new_team_id = team_id if team_id else self.team_id.id
upd_values = {
'type': 'opportunity',
'date_open': fields.Datetime.now(),
'date_conversion': fields.Datetime.now(),
}
if customer != self.partner_id:
upd_values['partner_id'] = customer.id if customer else False
if not self.stage_id:
stage = self._stage_find(team_id=new_team_id)
upd_values['stage_id'] = stage.id
return upd_values
def convert_opportunity(self, partner_id, user_ids=False, team_id=False):
customer = False
if partner_id:
customer = self.env['res.partner'].browse(partner_id)
for lead in self:
if not lead.active or lead.probability == 100:
continue
vals = lead._convert_opportunity_data(customer, team_id)
lead.write(vals)
if user_ids or team_id:
self.handle_salesmen_assignment(user_ids, team_id)
return True
def _get_lead_duplicates(self, partner=None, email=None, include_lost=False):
""" Search for leads that seem duplicated based on partner / email.
:param partner : optional customer when searching duplicated
:param email: email (possibly formatted) to search
:param boolean include_lost: if True, search includes archived opportunities
(still only active leads are considered). If False, search for active
and not won leads and opportunities;
"""
if not email and not partner:
return self.env['crm.lead']
domain = []
for normalized_email in [tools.email_normalize(email) for email in tools.email_split(email)]:
domain.append(('email_normalized', '=', normalized_email))
if partner:
domain.append(('partner_id', '=', partner.id))
if not domain:
return self.env['crm.lead']
domain = ['|'] * (len(domain) - 1) + domain
if include_lost:
domain += ['|', ('type', '=', 'opportunity'), ('active', '=', True)]
else:
domain += ['&', ('active', '=', True), '|', ('probability', '=', False), ('probability', '<', 100)]
return self.with_context(active_test=False).search(domain)
def _create_customer(self):
""" Create a partner from lead data and link it to the lead.
:return: newly-created partner browse record
"""
Partner = self.env['res.partner']
contact_name = self.contact_name
if not contact_name:
contact_name = Partner._parse_partner_name(self.email_from)[0] if self.email_from else False
if self.partner_name:
partner_company = Partner.create(self._prepare_customer_values(self.partner_name, is_company=True))
elif self.partner_id:
partner_company = self.partner_id
else:
partner_company = None
if contact_name:
return Partner.create(self._prepare_customer_values(contact_name, is_company=False, parent_id=partner_company.id if partner_company else False))
if partner_company:
return partner_company
return Partner.create(self._prepare_customer_values(self.name, is_company=False))
def _prepare_customer_values(self, partner_name, is_company=False, parent_id=False):
""" Extract data from lead to create a partner.
:param name : furtur name of the partner
:param is_company : True if the partner is a company
:param parent_id : id of the parent partner (False if no parent)
:return: dictionary of values to give at res_partner.create()
"""
email_split = tools.email_split(self.email_from)
res = {
'name': partner_name,
'user_id': self.env.context.get('default_user_id') or self.user_id.id,
'comment': self.description,
'team_id': self.team_id.id,
'parent_id': parent_id,
'phone': self.phone,
'mobile': self.mobile,
'email': email_split[0] if email_split else False,
'title': self.title.id,
'function': self.function,
'street': self.street,
'street2': self.street2,
'zip': self.zip,
'city': self.city,
'country_id': self.country_id.id,
'state_id': self.state_id.id,
'website': self.website,
'is_company': is_company,
'type': 'contact'
}
if self.lang_id:
res['lang'] = self.lang_id.code
return res
def _find_matching_partner(self, email_only=False):
""" Try to find a matching partner with available information on the
lead, using notably customer's name, email, ...
:param email_only: Only find a matching based on the email. To use
for automatic process where ilike based on name can be too dangerous
:return: partner browse record
"""
self.ensure_one()
partner = self.partner_id
if not partner and self.email_from:
partner = self.env['res.partner'].search([('email', '=', self.email_from)], limit=1)
if not partner and not email_only:
# search through the existing partners based on the lead's partner or contact name
# to be aligned with _create_customer, search on lead's name as last possibility
for customer_potential_name in [self[field_name] for field_name in ['partner_name', 'contact_name', 'name'] if self[field_name]]:
partner = self.env['res.partner'].search([('name', 'ilike', '%' + customer_potential_name + '%')], limit=1)
if partner:
break
return partner
def handle_partner_assignment(self, force_partner_id=False, create_missing=True):
""" Update customer (partner_id) of leads. Purpose is to set the same
partner on most leads; either through a newly created partner either
through a given partner_id.
:param int force_partner_id: if set, update all leads to that customer;
:param create_missing: for leads without customer, create a new one
based on lead information;
"""
for lead in self:
if force_partner_id:
lead.partner_id = force_partner_id
if not lead.partner_id and create_missing:
partner = lead._create_customer()
lead.partner_id = partner.id
def handle_salesmen_assignment(self, user_ids=None, team_id=False):
""" Assign salesmen and salesteam to a batch of leads. If there are more
leads than salesmen, these salesmen will be assigned in round-robin. E.g.
4 salesmen (S1, S2, S3, S4) for 6 leads (L1, L2, ... L6) will assigned as
following: L1 - S1, L2 - S2, L3 - S3, L4 - S4, L5 - S1, L6 - S2.
:param list user_ids: salesmen to assign
:param int team_id: salesteam to assign
"""
update_vals = {'team_id': team_id} if team_id else {}
if not user_ids:
self.write(update_vals)
else:
lead_ids = self.ids
steps = len(user_ids)
# pass 1 : lead_ids[0:6:3] = [L1,L4]
# pass 2 : lead_ids[1:6:3] = [L2,L5]
# pass 3 : lead_ids[2:6:3] = [L3,L6]
# ...
for idx in range(0, steps):
subset_ids = lead_ids[idx:len(lead_ids):steps]
update_vals['user_id'] = user_ids[idx]
self.env['crm.lead'].browse(subset_ids).write(update_vals)
# ------------------------------------------------------------
# TOOLS
# ------------------------------------------------------------
def redirect_lead_opportunity_view(self):
self.ensure_one()
return {
'name': _('Lead or Opportunity'),
'view_mode': 'form',
'res_model': 'crm.lead',
'domain': [('type', '=', self.type)],
'res_id': self.id,
'view_id': False,
'type': 'ir.actions.act_window',
'context': {'default_type': self.type}
}
@api.model
def get_empty_list_help(self, help):
help_title, sub_title = "", ""
if self._context.get('default_type') == 'lead':
help_title = _('Create a new lead')
else:
help_title = _('Create an opportunity to start playing with your pipeline.')
alias_record = self.env['mail.alias'].search([
('alias_name', '!=', False),
('alias_name', '!=', ''),
('alias_model_id.model', '=', 'crm.lead'),
('alias_parent_model_id.model', '=', 'crm.team'),
('alias_force_thread_id', '=', False)
], limit=1)
if alias_record and alias_record.alias_domain and alias_record.alias_name:
email = '%s@%s' % (alias_record.alias_name, alias_record.alias_domain)
email_link = "<b><a href='mailto:%s'>%s</a></b>" % (email, email)
sub_title = _('Use the top left <i>Create</i> button, or send an email to %s to test the email gateway.') % (email_link)
return '<p class="o_view_nocontent_smiling_face">%s</p><p class="oe_view_nocontent_alias">%s</p>' % (help_title, sub_title)
# ------------------------------------------------------------
# MAILING
# ------------------------------------------------------------
def _creation_subtype(self):
return self.env.ref('crm.mt_lead_create')
def _track_subtype(self, init_values):
self.ensure_one()
if 'stage_id' in init_values and self.probability == 100 and self.stage_id:
return self.env.ref('crm.mt_lead_won')
elif 'lost_reason' in init_values and self.lost_reason:
return self.env.ref('crm.mt_lead_lost')
elif 'stage_id' in init_values:
return self.env.ref('crm.mt_lead_stage')
elif 'active' in init_values and self.active:
return self.env.ref('crm.mt_lead_restored')
elif 'active' in init_values and not self.active:
return self.env.ref('crm.mt_lead_lost')
return super(Lead, self)._track_subtype(init_values)
def _notify_get_groups(self, msg_vals=None):
""" Handle salesman recipients that can convert leads into opportunities
and set opportunities as won / lost. """
groups = super(Lead, self)._notify_get_groups(msg_vals=msg_vals)
local_msg_vals = dict(msg_vals or {})
self.ensure_one()
if self.type == 'lead':
convert_action = self._notify_get_action_link('controller', controller='/lead/convert', **local_msg_vals)
salesman_actions = [{'url': convert_action, 'title': _('Convert to opportunity')}]
else:
won_action = self._notify_get_action_link('controller', controller='/lead/case_mark_won', **local_msg_vals)
lost_action = self._notify_get_action_link('controller', controller='/lead/case_mark_lost', **local_msg_vals)
salesman_actions = [
{'url': won_action, 'title': _('Won')},
{'url': lost_action, 'title': _('Lost')}]
if self.team_id:
custom_params = dict(local_msg_vals, res_id=self.team_id.id, model=self.team_id._name)
salesman_actions.append({
'url': self._notify_get_action_link('view', **custom_params),
'title': _('Sales Team Settings')
})
salesman_group_id = self.env.ref('sales_team.group_sale_salesman').id
new_group = (
'group_sale_salesman', lambda pdata: pdata['type'] == 'user' and salesman_group_id in pdata['groups'], {
'actions': salesman_actions,
})
return [new_group] + groups
def _notify_get_reply_to(self, default=None, records=None, company=None, doc_names=None):
""" Override to set alias of lead and opportunities to their sales team if any. """
aliases = self.mapped('team_id').sudo()._notify_get_reply_to(default=default, records=None, company=company, doc_names=None)
res = {lead.id: aliases.get(lead.team_id.id) for lead in self}
leftover = self.filtered(lambda rec: not rec.team_id)
if leftover:
res.update(super(Lead, leftover)._notify_get_reply_to(default=default, records=None, company=company, doc_names=doc_names))
return res
def _message_get_default_recipients(self):
return {r.id: {
'partner_ids': [],
'email_to': r.email_normalized,
'email_cc': False}
for r in self}
def _message_get_suggested_recipients(self):
recipients = super(Lead, self)._message_get_suggested_recipients()
try:
for lead in self:
if lead.partner_id:
lead._message_add_suggested_recipient(recipients, partner=lead.partner_id, reason=_('Customer'))
elif lead.email_from:
lead._message_add_suggested_recipient(recipients, email=lead.email_from, reason=_('Customer Email'))
except AccessError: # no read access rights -> just ignore suggested recipients because this imply modifying followers
pass
return recipients
@api.model
def message_new(self, msg_dict, custom_values=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
# remove external users
if self.env.user.has_group('base.group_portal'):
self = self.with_context(default_user_id=False)
# remove default author when going through the mail gateway. Indeed we
# do not want to explicitly set user_id to False; however we do not
# want the gateway user to be responsible if no other responsible is
# found.
if self._uid == self.env.ref('base.user_root').id:
self = self.with_context(default_user_id=False)
if custom_values is None:
custom_values = {}
defaults = {
'name': msg_dict.get('subject') or _("No Subject"),
'email_from': msg_dict.get('from'),
'partner_id': msg_dict.get('author_id', False),
}
if msg_dict.get('priority') in dict(crm_stage.AVAILABLE_PRIORITIES):
defaults['priority'] = msg_dict.get('priority')
defaults.update(custom_values)
# assign right company
if 'company_id' not in defaults and 'team_id' in defaults:
defaults['company_id'] = self.env['crm.team'].browse(defaults['team_id']).company_id.id
return super(Lead, self).message_new(msg_dict, custom_values=defaults)
def _message_post_after_hook(self, message, msg_vals):
if self.email_from and not self.partner_id:
# we consider that posting a message with a specified recipient (not a follower, a specific one)
# on a document without customer means that it was created through the chatter using
# suggested recipients. This heuristic allows to avoid ugly hacks in JS.
new_partner = message.partner_ids.filtered(lambda partner: partner.email == self.email_from)
if new_partner:
self.search([
('partner_id', '=', False),
('email_from', '=', new_partner.email),
('stage_id.fold', '=', False)]).write({'partner_id': new_partner.id})
return super(Lead, self)._message_post_after_hook(message, msg_vals)
def _message_partner_info_from_emails(self, emails, link_mail=False):
result = super(Lead, self)._message_partner_info_from_emails(emails, link_mail=link_mail)
for partner_info in result:
if not partner_info.get('partner_id') and (self.partner_name or self.contact_name):
emails = email_re.findall(partner_info['full_name'] or '')
email = emails and emails[0] or ''
if email and self.email_from and email.lower() == self.email_from.lower():
partner_info['full_name'] = tools.formataddr((self.contact_name or self.partner_name, email))
break
return result
def _phone_get_number_fields(self):
""" Use mobile or phone fields to compute sanitized phone number """
return ['mobile', 'phone']
@api.model
def get_import_templates(self):
return [{
'label': _('Import Template for Leads & Opportunities'),
'template': '/crm/static/xls/crm_lead.xls'
}]
# ------------------------------------------------------------
# PLS
# ------------------------------------------------------------
# Predictive lead scoring is computing the lead probability, based on won and lost leads from the past
# Each won/lost lead increments a frequency table, where we store, for each field/value couple, the number of
# won and lost leads.
# E.g. : A won lead from Belgium will increase the won count of the frequency country_id='Belgium' by 1.
# The frequencies are split by team_id, so each team has his own frequencies environment. (Team A doesn't impact B)
# There are two main ways to build the frequency table:
# - Live Increment: At each Won/lost, we increment directly the frequencies based on the lead values.
# Done right BEFORE writing the lead as won or lost.
# We consider a lead that will be marked as won or lost.
# Used each time a lead is won or lost, to ensure frequency table is always up to date
# - One shot Rebuild: empty the frequency table and rebuild it from scratch, based on every already won/lost leads
# Done during cron process.
# We consider all the leads that have been already won or lost.
# Used in one shot, when modifying the criteria to take into account (fields or reference date)
# ---------------------------------
# PLS: Probability Computation
# ---------------------------------
def _pls_get_naive_bayes_probabilities(self, batch_mode=False):
"""
In machine learning, naive Bayes classifiers (NBC) are a family of simple "probabilistic classifiers" based on
applying Bayes theorem with strong (naive) independence assumptions between the variables taken into account.
E.g: will TDE eat m&m's depending on his sleep status, the amount of work he has and the fullness of his stomach?
As we use experience to compute the statistics, every day, we will register the variables state + the result.
As the days pass, we will be able to determine, with more and more precision, if TDE will eat m&m's
for a specific combination :
- did sleep very well, a lot of work and stomach full > Will never happen !
- didn't sleep at all, no work at all and empty stomach > for sure !
Following Bayes' Theorem: the probability that an event occurs (to win) under certain conditions is proportional
to the probability to win under each condition separately and the probability to win. We compute a 'Win score'
-> P(Won | A∩B) ∝ P(A∩B | Won)*P(Won) OR S(Won | A∩B) = P(A∩B | Won)*P(Won)
To compute a percentage of probability to win, we also compute the 'Lost score' that is proportional to the
probability to lose under each condition separately and the probability to lose.
-> Probability = S(Won | A∩B) / ( S(Won | A∩B) + S(Lost | A∩B) )
See https://www.youtube.com/watch?v=CPqOCI0ahss can help to get a quick and simple example.
One issue about NBC is when a event occurence is never observed.
E.g: if when TDE has an empty stomach, he always eat m&m's, than the "not eating m&m's when empty stomach' event
will never be observed.
This is called 'zero frequency' and that leads to division (or at least multiplication) by zero.
To avoid this, we add 0.1 in each frequency. With few data, the computation is than not really realistic.
The more we have records to analyse, the more the estimation will be precise.
:return: probability in percent (and integer rounded) that the lead will be won at the current stage.
"""
lead_probabilities = {}
if not self:
return lead_probabilities
# Get all leads values, no matter the team_id
domain = []
if batch_mode:
domain = [
'&',
('active', '=', True), ('id', 'in', self.ids),
'|',
('probability', '=', None),
'&',
('probability', '<', 100), ('probability', '>', 0)
]
leads_values_dict = self._pls_get_lead_pls_values(domain=domain)
if not leads_values_dict:
return lead_probabilities
# Get unique couples to search in frequency table and won leads.
leads_fields = set() # keep unique fields, as a lead can have multiple tag_ids
won_leads = set()
won_stage_ids = self.env['crm.stage'].search([('is_won', '=', True)]).ids
for lead_id, values in leads_values_dict.items():
for field, value in values['values']:
if field == 'stage_id' and value in won_stage_ids:
won_leads.add(lead_id)
leads_fields.add(field)
# get all variable related records from frequency table, no matter the team_id
frequencies = self.env['crm.lead.scoring.frequency'].search([('variable', 'in', list(leads_fields))], order="team_id asc")
# get all team_ids from frequencies
frequency_teams = frequencies.mapped('team_id')
frequency_team_ids = [0] + [team.id for team in frequency_teams]
# 1. Compute each variable value count individually
# regroup each variable to be able to compute their own probabilities
# As all the variable does not enter into account (as we reject unset values in the process)
# each value probability must be computed only with their own variable related total count
# special case: for lead for which team_id is not in frequency table,
# we consider all the records, independently from team_id (this is why we add a result[-1])
result = dict((team_id, dict((field, dict(won_total=0, lost_total=0)) for field in leads_fields)) for team_id in frequency_team_ids)
result[-1] = dict((field, dict(won_total=0, lost_total=0)) for field in leads_fields)
for frequency in frequencies:
team_result = result[frequency.team_id.id if frequency.team_id else 0]
field = frequency['variable']
value = frequency['value']
# To avoid that a tag take to much importance if his subset is too small,
# we ignore the tag frequencies if we have less than 50 won or lost for this tag.
if field == 'tag_id' and (frequency['won_count'] + frequency['lost_count']) < 50:
continue
team_result[field][value] = {'won': frequency['won_count'], 'lost': frequency['lost_count']}
team_result[field]['won_total'] += frequency['won_count']
team_result[field]['lost_total'] += frequency['lost_count']
if value not in result[-1][field]:
result[-1][field][value] = {'won': 0, 'lost': 0}
result[-1][field][value]['won'] += frequency['won_count']
result[-1][field][value]['lost'] += frequency['lost_count']
result[-1][field]['won_total'] += frequency['won_count']
result[-1][field]['lost_total'] += frequency['lost_count']
# Get all won, lost and total count for all records in frequencies per team_id
for team_id in result:
result[team_id]['team_won'], \
result[team_id]['team_lost'], \
result[team_id]['team_total'] = self._pls_get_won_lost_total_count(result[team_id])
save_team_id = None
p_won, p_lost = 1, 1
for lead_id, lead_values in leads_values_dict.items():
# if stage_id is null, return 0 and bypass computation
lead_fields = [value[0] for value in lead_values.get('values', [])]
if not 'stage_id' in lead_fields:
lead_probabilities[lead_id] = 0
continue
# if lead stage is won, return 100
elif lead_id in won_leads:
lead_probabilities[lead_id] = 100
continue
lead_team_id = lead_values['team_id'] if lead_values['team_id'] else 0 # team_id = None -> Convert to 0
lead_team_id = lead_team_id if lead_team_id in result else -1 # team_id not in frequency Table -> convert to -1
if lead_team_id != save_team_id:
save_team_id = lead_team_id
team_won = result[save_team_id]['team_won']
team_lost = result[save_team_id]['team_lost']
team_total = result[save_team_id]['team_total']
# if one count = 0, we cannot compute lead probability
if not team_won or not team_lost:
continue
p_won = team_won / team_total
p_lost = team_lost / team_total
# 2. Compute won and lost score using each variable's individual probability
s_lead_won, s_lead_lost = p_won, p_lost
for field, value in lead_values['values']:
field_result = result.get(save_team_id, {}).get(field)
value = value.origin if hasattr(value, 'origin') else value
value_result = field_result.get(str(value)) if field_result else False
if value_result:
total_won = team_won if field == 'stage_id' else field_result['won_total']
total_lost = team_lost if field == 'stage_id' else field_result['lost_total']
s_lead_won *= value_result['won'] / total_won
s_lead_lost *= value_result['lost'] / total_lost
# 3. Compute Probability to win
lead_probabilities[lead_id] = round(100 * s_lead_won / (s_lead_won + s_lead_lost), 2)
return lead_probabilities
# ---------------------------------
# PLS: Live Increment
# ---------------------------------
def _pls_increment_frequencies(self, from_state=None, to_state=None):
"""
When losing or winning a lead, this method is called to increment each PLS parameter related to the lead
in won_count (if won) or in lost_count (if lost).
This method is also used when reactivating a mistakenly lost lead (using the decrement argument).
In this case, the lost count should be de-increment by 1 for each PLS parameter linked ot the lead.
Live increment must be done before writing the new values because we need to know the state change (from and to).
This would not be an issue for the reach won or reach lost as we just need to increment the frequencies with the
final state of the lead.
This issue is when the lead leaves a closed state because once the new values have been writen, we do not know
what was the previous state that we need to decrement.
This is why 'is_won' and 'decrement' parameters are used to describe the from / to change of his state.
"""
new_frequencies_by_team, existing_frequencies_by_team = self._pls_prepare_update_frequency_table(target_state=from_state or to_state)
# update frequency table
self._pls_update_frequency_table(new_frequencies_by_team, 1 if to_state else -1,
existing_frequencies_by_team=existing_frequencies_by_team)
# ---------------------------------
# PLS: One shot rebuild
# ---------------------------------
def _cron_update_automated_probabilities(self):
""" This cron will :
- rebuild the lead scoring frequency table
- recompute all the automated_probability and align probability if both were aligned
"""
cron_start_date = datetime.now()
self._rebuild_pls_frequency_table()
self._update_automated_probabilities()
_logger.info("Predictive Lead Scoring : Cron duration = %d seconds" % ((datetime.now() - cron_start_date).total_seconds()))
def _rebuild_pls_frequency_table(self):
# Clear the frequencies table (in sql to speed up the cron)
try:
self.check_access_rights('unlink')
except AccessError:
raise UserError(_("You don't have the access needed to run this cron."))
else:
self._cr.execute('TRUNCATE TABLE crm_lead_scoring_frequency')
new_frequencies_by_team, unused = self._pls_prepare_update_frequency_table(rebuild=True)
# update frequency table
self._pls_update_frequency_table(new_frequencies_by_team, 1)
_logger.info("Predictive Lead Scoring : crm.lead.scoring.frequency table rebuilt")
def _update_automated_probabilities(self):
""" Recompute all the automated_probability (and align probability if both were aligned) for all the leads
that are active (not won, nor lost).
For performance matter, as there can be a huge amount of leads to recompute, this cron proceed by batch.
Each batch is performed into its own transaction, in order to minimise the lock time on the lead table
(and to avoid complete lock if there was only 1 transaction that would last for too long -> several minutes).
If a concurrent update occurs, it will simply be put in the queue to get the lock.
"""
pls_start_date = self._pls_get_safe_start_date()
if not pls_start_date:
return
# 1. Get all the leads to recompute created after pls_start_date that are nor won nor lost
# (Won : probability = 100 | Lost : probability = 0 or inactive. Here, inactive won't be returned anyway)
# Get also all the lead without probability --> These are the new leads. Activate auto probability on them.
pending_lead_domain = [
'&',
'&',
('stage_id', '!=', False), ('create_date', '>=', pls_start_date),
'|',
('probability', '=', False),
'&',
('probability', '<', 100), ('probability', '>', 0)
]
leads_to_update = self.env['crm.lead'].search(pending_lead_domain)
leads_to_update_count = len(leads_to_update)
# 2. Compute by batch to avoid memory error
lead_probabilities = {}
for i in range(0, leads_to_update_count, PLS_COMPUTE_BATCH_STEP):
leads_to_update_part = leads_to_update[i:i + PLS_COMPUTE_BATCH_STEP]
lead_probabilities.update(leads_to_update_part._pls_get_naive_bayes_probabilities(batch_mode=True))
_logger.info("Predictive Lead Scoring : New automated probabilities computed")
# 3. Group by new probability to reduce server roundtrips when executing the update
probability_leads = defaultdict(list)
for lead_id, probability in sorted(lead_probabilities.items()):
probability_leads[probability].append(lead_id)
# 4. Update automated_probability (+ probability if both were equal)
update_sql = """UPDATE crm_lead
SET automated_probability = %s,
probability = CASE WHEN (probability = automated_probability OR probability is null)
THEN (%s)
ELSE (probability)
END
WHERE id in %s"""
# Update by a maximum number of leads at the same time, one batch by transaction :
# - avoid memory errors
# - avoid blocking the table for too long with a too big transaction
transactions_count, transactions_failed_count = 0, 0
cron_update_lead_start_date = datetime.now()
auto_commit = not getattr(threading.currentThread(), 'testing', False)
for probability, probability_lead_ids in probability_leads.items():
for lead_ids_current in tools.split_every(PLS_UPDATE_BATCH_STEP, probability_lead_ids):
transactions_count += 1
try:
self.env.cr.execute(update_sql, (probability, probability, tuple(lead_ids_current)))
# auto-commit except in testing mode
if auto_commit:
self.env.cr.commit()
except Exception as e:
_logger.warning("Predictive Lead Scoring : update transaction failed. Error: %s" % e)
transactions_failed_count += 1
_logger.info(
"Predictive Lead Scoring : All automated probabilities updated (%d leads / %d transactions (%d failed) / %d seconds)" % (
leads_to_update_count,
transactions_count,
transactions_failed_count,
(datetime.now() - cron_update_lead_start_date).total_seconds(),
)
)
# ---------------------------------
# PLS: Common parts for both mode
# ---------------------------------
def _pls_prepare_update_frequency_table(self, rebuild=False, target_state=False):
"""
This method is common to Live Increment or Full Rebuild mode, as it shares the main steps.
This method will prepare the frequency dict needed to update the frequency table:
- New frequencies: frequencies that we need to add in the frequency table.
- Existing frequencies: frequencies that are already in the frequency table.
In rebuild mode, only the new frequencies are needed as existing frequencies are truncated.
For each team, each dict contains the frequency in won and lost for each field/value couple
of the target leads.
Target leads are :
- in Live increment mode : given ongoing leads (self)
- in Full rebuild mode : all the closed (won and lost) leads in the DB.
During the frequencies update, with both new and existing frequencies, we can split frequencies to update
and frequencies to add. If a field/value couple already exists in the frequency table, we just update it.
Otherwise, we need to insert a new one.
"""
# Keep eligible leads
pls_start_date = self._pls_get_safe_start_date()
if not pls_start_date:
return {}, {}
if rebuild: # rebuild will treat every closed lead in DB, increment will treat current ongoing leads
pls_leads = self
else:
# Only treat leads created after the PLS start Date
pls_leads = self.filtered(
lambda lead: fields.Date.to_date(pls_start_date) <= fields.Date.to_date(lead.create_date))
if not pls_leads:
return {}, {}
# Extract target leads values
if rebuild: # rebuild is ok
domain = [
'&',
('create_date', '>=', pls_start_date),
'|',
('probability', '=', 100),
'&',
('probability', '=', 0), ('active', '=', False)
]
team_ids = self.env['crm.team'].with_context(active_test=False).search([]).ids + [0] # If team_id is unset, consider it as team 0
else: # increment
domain = [('id', 'in', pls_leads.ids)]
team_ids = pls_leads.mapped('team_id').ids + [0]
leads_values_dict = pls_leads._pls_get_lead_pls_values(domain=domain)
# split leads values by team_id
# get current frequencies related to the target leads
leads_frequency_values_by_team = dict((team_id, []) for team_id in team_ids)
leads_pls_fields = set() # ensure to keep each field unique (can have multiple tag_id leads_values_dict)
for lead_id, values in leads_values_dict.items():
team_id = values.get('team_id', 0) # If team_id is unset, consider it as team 0
lead_frequency_values = {'count': 1}
for field, value in values['values']:
if field != "probability": # was added to lead values in batch mode to know won/lost state, but is not a pls fields.
leads_pls_fields.add(field)
else: # extract lead probability - needed to increment tag_id frequency. (proba always before tag_id)
lead_probability = value
if field == 'tag_id': # handle tag_id separatelly (as in One Shot rebuild mode)
leads_frequency_values_by_team[team_id].append({field: value, 'count': 1, 'probability': lead_probability})
else:
lead_frequency_values[field] = value
leads_frequency_values_by_team[team_id].append(lead_frequency_values)
leads_pls_fields = list(leads_pls_fields)
# get new frequencies
new_frequencies_by_team = {}
for team_id in team_ids:
# prepare fields and tag values for leads by team
new_frequencies_by_team[team_id] = self._pls_prepare_frequencies(
leads_frequency_values_by_team[team_id], leads_pls_fields, target_state=target_state)
# get existing frequencies
existing_frequencies_by_team = {}
if not rebuild: # there is no existing frequency in rebuild mode as they were all deleted.
# read all fields to get everything in memory in one query (instead of having query + prefetch)
existing_frequencies = self.env['crm.lead.scoring.frequency'].search_read(
['&', ('variable', 'in', leads_pls_fields),
'|', ('team_id', 'in', pls_leads.mapped('team_id').ids), ('team_id', '=', False)])
for frequency in existing_frequencies:
team_id = frequency['team_id'][0] if frequency.get('team_id') else 0
if team_id not in existing_frequencies_by_team:
existing_frequencies_by_team[team_id] = dict((field, {}) for field in leads_pls_fields)
existing_frequencies_by_team[team_id][frequency['variable']][frequency['value']] = {
'frequency_id': frequency['id'],
'won': frequency['won_count'],
'lost': frequency['lost_count']
}
return new_frequencies_by_team, existing_frequencies_by_team
def _pls_update_frequency_table(self, new_frequencies_by_team, step, existing_frequencies_by_team=None):
""" Create / update the frequency table in a cross company way, per team_id"""
values_to_update = {}
values_to_create = []
if not existing_frequencies_by_team:
existing_frequencies_by_team = {}
# build the create multi + frequencies to update
for team_id, new_frequencies in new_frequencies_by_team.items():
for field, value in new_frequencies.items():
# frequency already present ?
current_frequencies = existing_frequencies_by_team.get(team_id, {})
for param, result in value.items():
current_frequency_for_couple = current_frequencies.get(field, {}).get(param, {})
# If frequency already present : UPDATE IT
if current_frequency_for_couple:
new_won = current_frequency_for_couple['won'] + (result['won'] * step)
new_lost = current_frequency_for_couple['lost'] + (result['lost'] * step)
# ensure to have always positive frequencies
values_to_update[current_frequency_for_couple['frequency_id']] = {
'won_count': new_won if new_won > 0 else 0.1,
'lost_count': new_lost if new_lost > 0 else 0.1
}
continue
# Else, CREATE a new frequency record.
# We add + 0.1 in won and lost counts to avoid zero frequency issues
# should be +1 but it weights too much on small recordset.
values_to_create.append({
'variable': field,
'value': param,
'won_count': result['won'] + 0.1,
'lost_count': result['lost'] + 0.1,
'team_id': team_id if team_id else None # team_id = 0 means no team_id
})
LeadScoringFrequency = self.env['crm.lead.scoring.frequency'].sudo()
for frequency_id, values in values_to_update.items():
LeadScoringFrequency.browse(frequency_id).write(values)
if values_to_create:
LeadScoringFrequency.create(values_to_create)
# ---------------------------------
# Utility Tools for PLS
# ---------------------------------
# PLS: Config Parameters
# ---------------------
def _pls_get_safe_start_date(self):
""" As config_parameters does not accept Date field,
we get directly the date formated string stored into the Char config field,
as we directly use this string in the sql queries.
To avoid sql injections when using this config param,
we ensure the date string can be effectively a date."""
str_date = self.env['ir.config_parameter'].sudo().get_param('crm.pls_start_date')
if not fields.Date.to_date(str_date):
return False
return str_date
def _pls_get_safe_fields(self):
""" As config_parameters does not accept M2M field,
we the fields from the formated string stored into the Char config field.
To avoid sql injections when using that list, we return only the fields
that are defined on the model. """
pls_fields_config = self.env['ir.config_parameter'].sudo().get_param('crm.pls_fields')
pls_fields = pls_fields_config.split(',') if pls_fields_config else []
pls_safe_fields = [field for field in pls_fields if field in self._fields.keys()]
return pls_safe_fields
# Compute Automated Probability Tools
# -----------------------------------
def _pls_get_won_lost_total_count(self, team_results):
""" Get all won and all lost + total :
first stage can be used to know how many lost and won there is
as won count are equals for all stage
and first stage is always incremented in lost_count
:param frequencies: lead_scoring_frequencies
:return: won count, lost count and total count for all records in frequencies
"""
# TODO : check if we need to handle specific team_id stages [for lost count] (if first stage in sequence is team_specific)
first_stage_id = self.env['crm.stage'].search([('team_id', '=', False)], order='sequence', limit=1)
if str(first_stage_id.id) not in team_results.get('stage_id', []):
return 0, 0, 0
stage_result = team_results['stage_id'][str(first_stage_id.id)]
return stage_result['won'], stage_result['lost'], stage_result['won'] + stage_result['lost']
# PLS: Rebuild Frequency Table Tools
# ----------------------------------
def _pls_prepare_frequencies(self, lead_values, leads_pls_fields, target_state=None):
"""new state is used when getting frequencies for leads that are changing to lost or won.
Stays none if we are checking frequencies for leads already won or lost."""
# Frequencies must include tag_id
pls_fields = set(leads_pls_fields + ['tag_id'])
frequencies = dict((field, {}) for field in pls_fields)
stage_ids = self.env['crm.stage'].search_read([], ['sequence', 'name', 'id'], order='sequence')
stage_sequences = {stage['id']: stage['sequence'] for stage in stage_ids}
# Increment won / lost frequencies by criteria (field / value couple)
for values in lead_values:
if target_state: # ignore probability values if target state (as probability is the old value)
won_count = values['count'] if target_state == 'won' else 0
lost_count = values['count'] if target_state == 'lost' else 0
else:
won_count = values['count'] if values.get('probability', 0) == 100 else 0
lost_count = values['count'] if values.get('probability', 1) == 0 else 0
if 'tag_id' in values:
frequencies = self._pls_increment_frequency_dict(frequencies, 'tag_id', values['tag_id'], won_count, lost_count)
continue
# Else, treat other fields
if 'tag_id' in pls_fields: # tag_id already treated here above.
pls_fields.remove('tag_id')
for field in pls_fields:
if field not in values:
continue
value = values[field]
if value or field in ('email_state', 'phone_state'):
if field == 'stage_id':
if won_count: # increment all stages if won
stages_to_increment = [stage['id'] for stage in stage_ids]
else: # increment only current + previous stages if lost
current_stage_sequence = stage_sequences[value]
stages_to_increment = [stage['id'] for stage in stage_ids if stage['sequence'] <= current_stage_sequence]
for stage_id in stages_to_increment:
frequencies = self._pls_increment_frequency_dict(frequencies, field, stage_id, won_count, lost_count)
else:
frequencies = self._pls_increment_frequency_dict(frequencies, field, value, won_count, lost_count)
return frequencies
def _pls_increment_frequency_dict(self, frequencies, field, value, won, lost):
value = str(value) # Ensure we will always compare strings.
if value not in frequencies[field]:
frequencies[field][value] = {'won': won, 'lost': lost}
else:
frequencies[field][value]['won'] += won
frequencies[field][value]['lost'] += lost
return frequencies
# Common PLS Tools
# ----------------
def _pls_get_lead_pls_values(self, domain=[]):
"""
This methods builds a dict where, for each lead in self or matching the given domain,
we will get a list of field/value couple.
Due to onchange and create, we don't always have the id of the lead to recompute.
When we update few records (one, typically) with onchanges, we build the lead_values (= couple field/value)
using the ORM.
To speed up the computation and avoid making too much DB read inside loops,
we can give a domain to make sql queries to bypass the ORM.
This domain will be used in sql queries to get the values for every lead matching the domain.
:param domain: If set, we get all the leads values via unique sql queries (one for tags, one for other fields),
using the given domain on leads.
If not set, get lead values lead by lead using the ORM.
:return: {lead_id: [(field1: value1), (field2: value2), ...], ...}
"""
leads_values_dict = OrderedDict()
pls_fields = ["stage_id", "team_id"] + self._pls_get_safe_fields()
if domain:
# active_test = False as domain should take active into 'active' field it self
from_clause, where_clause, where_params = self.env['crm.lead'].with_context(active_test=False)._where_calc(domain).get_sql()
str_fields = ", ".join(["{}"] * len(pls_fields))
args = [sql.Identifier(field) for field in pls_fields]
# Get leads values
self.flush(['probability'])
query = """SELECT id, probability, %s
FROM %s
WHERE %s order by team_id asc"""
query = sql.SQL(query % (str_fields, from_clause, where_clause)).format(*args)
self._cr.execute(query, where_params)
lead_results = self._cr.dictfetchall()
# Get tags values
query = """SELECT crm_lead.id as lead_id, t.id as tag_id
FROM %s
LEFT JOIN crm_tag_rel rel ON crm_lead.id = rel.lead_id
LEFT JOIN crm_tag t ON rel.tag_id = t.id
WHERE %s order by crm_lead.team_id asc"""
query = sql.SQL(query % (from_clause, where_clause)).format(*args)
self._cr.execute(query, where_params)
tag_results = self._cr.dictfetchall()
# get all (variable, value) couple for all in self
for lead in lead_results:
lead_values = []
for field in pls_fields + ['probability']: # add probability as used in _pls_prepare_frequencies (needed in rebuild mode)
value = lead[field]
if field == 'team_id': # ignore team_id as stored separately in leads_values_dict[lead_id][team_id]
continue
if value or field == 'probability': # 0 is a correct value for probability
lead_values.append((field, value))
elif field in ('email_state', 'phone_state'): # As ORM reads 'None' as 'False', do the same here
lead_values.append((field, False))
leads_values_dict[lead['id']] = {'values': lead_values, 'team_id': lead['team_id'] or 0}
for tag in tag_results:
if tag['tag_id']:
leads_values_dict[tag['lead_id']]['values'].append(('tag_id', tag['tag_id']))
return leads_values_dict
else:
for lead in self:
lead_values = []
for field in pls_fields:
if field == 'team_id': # ignore team_id as stored separately in leads_values_dict[lead_id][team_id]
continue
value = lead[field].id if isinstance(lead[field], models.BaseModel) else lead[field]
if value or field in ('email_state', 'phone_state'):
lead_values.append((field, value))
for tag in lead.tag_ids:
lead_values.append(('tag_id', tag.id))
leads_values_dict[lead.id] = {'values': lead_values, 'team_id': lead['team_id'].id}
return leads_values_dict
|
python
|
import sys
from schemas.input_conf import personal_info
from settings.base_conf import KOBO_PERSONAL_INFO_CSV_MAP
'''
json_structure - the json attributes that are to be extracted from the source json
mapping_format - see oldcuris_elastic_map for an example. import it here
input_format - default input of source json
final_format - final input structure. with other fields other than input format
source - source database
destination - destination database
'''
personal_informations = {
"json_structure": [],
"mapping_file": KOBO_PERSONAL_INFO_CSV_MAP,
"source": "kobo",
"destination": "couchbase"
}
|
python
|
""" Test Metadata Tool """
from __future__ import unicode_literals, absolute_import
from tmt.base import Tree
__all__ = ["Tree"]
|
python
|
import os
import matplotlib.pyplot as plt
from typing import List, Union, Tuple, Dict
import torch
import pickle
current_dir = os.path.dirname(os.path.realpath(__file__))
CATEGORY = List[Union[int, float]]
RUN_STATS = Dict[str, Union[int, float]]
def plot_score_and_acc_over_docs(
dir_name: str,
stats: List[Tuple[str, RUN_STATS]],
per_docs: int = 5
) -> None:
if not os.path.exists(current_dir + "/plots/" + dir_name):
os.makedirs(current_dir + "/plots/" + dir_name)
averages = calculate_averages(stats, per_docs)
num_docs = [count for count in range(per_docs, len(stats[0][1]['ksmr']) + 1, per_docs)]
bleu_improvement_avg = calculate_score_improvement_averages(averages['orig_nmt_out_bleu'],
averages['post_feedback_bleu'])
chrf_improvement_avg = calculate_score_improvement_averages(averages['orig_nmt_out_chrf'],
averages['post_feedback_chrf'])
save_plot_image(num_docs, averages['ksmr'], 'KSMR', dir_name)
save_plot_image(num_docs, averages['orig_nmt_out_bleu'], 'Original BLEU', dir_name)
save_plot_image(num_docs, averages['orig_nmt_out_chrf'], 'Original ChrF', dir_name)
save_plot_image(num_docs, averages['post_feedback_bleu'], 'Post Feedback BLEU', dir_name)
save_plot_image(num_docs, averages['post_feedback_chrf'], 'Post Feedback ChrF', dir_name)
save_plot_image(num_docs, averages['percent_sent_requested'], 'Percent Sents Requested', dir_name)
save_plot_image(num_docs, bleu_improvement_avg, 'Bleu Improvement', dir_name)
save_plot_image(num_docs, chrf_improvement_avg, 'ChrF Improvement', dir_name)
save_plot_map_ksmr_against_score_improvement(averages['ksmr'], bleu_improvement_avg, dir_name, 'BLEU')
save_plot_map_ksmr_against_score_improvement(averages['ksmr'], chrf_improvement_avg, dir_name, 'ChrF')
def save_plot_image(
num_docs: List[int],
averages: List[Tuple[str, CATEGORY]],
title: str,
folder_name: str
) -> None:
for run in averages:
plt.plot(num_docs, run[1], "--", label=run[0])
plt.title('{} Averages'.format(title))
plt.xlabel('Num Docs')
plt.ylabel(title)
plt.legend()
plt.savefig(current_dir + '/plots/{}/{}.png'.format(folder_name, title))
plt.close()
def calculate_averages(
stats: List[RUN_STATS],
per_docs: int,
) -> Dict[str, Union[List[int], List[float]]]:
categories = ['ksmr', 'post_feedback_bleu', 'post_feedback_chrf', 'percent_sent_requested',
'orig_nmt_out_bleu', 'orig_nmt_out_chrf']
averages = {cat: [] for cat in categories}
for category in categories:
for run in stats:
avgs = calculate_time_step_averages(run[1][category], per_docs)
averages[category].append((run[0], avgs))
return averages
def calculate_time_step_averages(
scores: CATEGORY,
per_docs: int
) -> Union[List[int], List[float]]:
"""
Calculate the running average at each time step
"""
chunk_indexes = [i for i in range(per_docs, len(scores) + 1, per_docs)]
averages = []
for i, count in enumerate(chunk_indexes):
starting_i = 0 if i == 0 else chunk_indexes[i - 1]
docs = scores[starting_i: count]
average = sum(docs) / per_docs
averages.append(average)
return averages
def calculate_score_improvement_averages(
original_score_avgs: List[Tuple[str, List[float]]],
post_feedback_score_avgs: List[Tuple[str, List[float]]],
) -> List[Tuple[str, List[float]]]:
run_improvement_avgs = []
for i in range(len(original_score_avgs)):
assert original_score_avgs[i][0] == post_feedback_score_avgs[i][0]
improve_avgs = [post_feedback_ave - orig_avg
for post_feedback_ave, orig_avg
in zip(post_feedback_score_avgs[i][1], original_score_avgs[i][1])]
run_improvement_avgs.append((original_score_avgs[i][0], improve_avgs))
return run_improvement_avgs
def save_plot_map_ksmr_against_score_improvement(
ksmr_scores: List[Tuple[str, List[int]]],
eval_improvement_scores: List[Tuple[str, List[int]]],
dir_name: str,
title: str
):
for i, run in enumerate(ksmr_scores):
ksmr_values, scores = zip(*sorted(zip(run[1], eval_improvement_scores[i][1])))
plt.plot(ksmr_values, scores, "o--", label=run[0])
plt.title('{} Improvement Across KSMR'.format(title))
plt.xlabel('KSMR (human effort)')
plt.ylabel(title)
plt.legend()
plt.savefig(current_dir + '/plots/{}/{} Improvement v KSMR.png'.format(dir_name, title))
plt.close()
if __name__ == "__main__":
files = [
("Policy 1", current_dir + "/scores_pol_1.p"),
("Policy 2", current_dir + "/scores_pol_2.p"),
("Online", current_dir + "/scores_pol_2_online.p"),
("Learned Sampling AL", current_dir + "/scores_pol_2_learned_AL.p"),
("AL", current_dir + "/scores_pol_2_AL.p")
]
run_stats = []
for run in files:
with open(run[1], "rb") as f:
stats = pickle.load(f)
run_stats.append((run[0], stats))
plot_score_and_acc_over_docs('run_0', run_stats)
|
python
|
from molsysmt._private_tools.exceptions import *
from molsysmt.forms.common_gets import *
import numpy as np
from molsysmt.molecular_system import molecular_system_components
from molsysmt._private_tools.files_and_directories import tmp_filename
form_name='file:dcd'
is_form = {
'file:dcd':form_name
}
info=["",""]
has = molecular_system_components.copy()
for ii in ['coordinates', 'box']:
has[ii]=True
def to_file_dcd(item, molecular_system=None, atom_indices='all', frame_indices='all', output_filename=None, copy_if_all=True):
tmp_molecular_system = None
if (atom_indices is 'all') and (frame_indices is 'all'):
if copy_if_all:
tmp_item = extract_item(item, output_filename=output_filename)
if molecular_system is not None:
tmp_molecular_system = molecular_system.combine_with_items(tmp_item)
else:
tmp_item = item
if molecular_system is not None:
tmp_molecular_system = molecular_system
else:
tmp_item = extract_item(item, atom_indices=atom_indices, frame_indices=frame_indices, output_filename=output_filename)
if molecular_system is not None:
tmp_molecular_system = molecular_system.combine_with_items(tmp_item, atom_indices=atom_indices, frame_indices=frame_indices)
return tmp_item, tmp_molecular_system
def extract_item(item, atom_indices='all', frame_indices='all', output_filename=None):
if output_filename is None:
output_filename = tmp_filename(extension='dcd')
if (atom_indices is 'all') and (frame_indices is 'all'):
raise NotImplementedError()
else:
raise NotImplementedError()
return tmp_item
def add(item, from_item, atom_indices='all', frame_indices='all'):
raise NotImplementedError()
def append_frames(item, step=None, time=None, coordinates=None, box=None):
raise NotImplementedError()
###### Get
## system
|
python
|
import mongolib
class a():
def aa(self):
a=mongolib.mongodb()
a.log_collect(msg='1gaejiusfuadaifuagusuifhiau afdu gaudf uisg uagsi gaug asyaigasydg aug iug ')
a.log_collect(msg='2')
a.log_input()
a.log_output()
aaaa=a()
aaaa.aa()
|
python
|
import inspect
import operator
import re
from datetime import datetime
from decimal import Decimal
from enum import Enum
from functools import reduce
import pymongo
from bson import ObjectId
from pymongo.collection import Collection, ReturnDocument
from pymongo.errors import CollectionInvalid
from appkernel.configuration import config
from appkernel.util import OBJ_PREFIX
from .model import Model, Expression, AppKernelException, SortOrder, Property, Index, TextIndex, UniqueIndex, \
CustomProperty
def xtract(clazz_or_instance):
"""
Extract class name from class, removing the Service/Controller/Resource ending and adding a plural -s or -ies.
:param clazz_or_instance: the class object
:return: the name of the desired collection
"""
clazz_name = clazz_or_instance.__name__ if inspect.isclass(
clazz_or_instance) else clazz_or_instance.__class__.__name__
name = re.split('Service|Controller|Resource', clazz_name)[0]
if name[-2:] in ['sh', 'ch'] or name[-1:] in ['s', 'x', 'z']:
name = f'{name}es'
elif name[-1:] == 'y' and (name[-2:-1] in ["a", "e", "i", "o", "u"] or name[-3:-2] == 'qu'):
name = f'{name[-1:]}ies'
else:
name = f'{name}s'
return name
class Query(object):
"""a class representing the query"""
def __init__(self, *expressions):
self.filter_expr = {}
self.sorting_expr = {}
self.__prep_expressions(*expressions)
def __prep_expressions(self, *expressions):
if not expressions:
return
where = reduce(operator.and_, expressions)
if isinstance(where, Expression):
if isinstance(where.lhs, (Property, CustomProperty)):
if where.lhs.backreference.within_an_array:
# this query is part of an array
self.filter_expr[str(where.lhs.backreference.array_parameter_name)] = where.ops.lmbda(
(where.lhs.backreference.parameter_name, Query.__extract_rhs(where.rhs)))
else:
# its only parameter to parameter comparison
self.filter_expr[str(where.lhs.backreference.parameter_name)] = where.ops.lmbda(
Query.__extract_rhs(where.rhs))
elif isinstance(where.lhs, Expression) and isinstance(where.rhs, Expression):
# two expressions are compared to each other
exprs = []
exprs.extend(self.__xtract_expression(where))
self.filter_expr[str(where.ops)] = [expression for expression in exprs]
def __xtract_expression(self, expression: Expression):
ret_val = []
if isinstance(expression.lhs, Expression):
ret_val.extend(self.__xtract_expression(expression.lhs))
if isinstance(expression.rhs, Expression):
ret_val.extend(self.__xtract_expression(expression.rhs))
if isinstance(expression.lhs, Property):
ret_val.append({
expression.lhs.backreference.parameter_name:
expression.ops.lmbda(Query.__extract_rhs(expression.rhs))
})
if isinstance(expression.rhs, Property):
ret_val.append({expression.lhs.backreference.parameter_name:
expression.ops.lmbda(Query.__extract_rhs(expression.rhs))})
return ret_val
@staticmethod
def __extract_rhs(right_hand_side):
if isinstance(right_hand_side, Property):
return right_hand_side.backreference.parameter_name
elif isinstance(right_hand_side, Enum):
return right_hand_side.name
else:
return right_hand_side
def sort_by(self, *sorting_tuples):
"""
Defines sorting criteria (eg. .sort_by(User.name.desc())
:param sorting_tuples: desc() or asc() on the Model parameter
:return: self for calling further methods on the class
:rtype: Query
"""
self.sorting_expr = list(sorting_tuples)
return self
def find(self):
"""
Creates a cursor based on the filter and sorting criteria and yields the results;
:return: a generator object which yields found instances of Model class
"""
raise NotImplementedError('abstract method')
def find_one(self):
"""
:return: One or none instances of the Model, depending on the query criteria
"""
raise NotImplementedError('abstract method')
def count(self):
"""
:return: the number of items in the repository matching the filter expression;
"""
raise NotImplementedError('abstract method')
def delete(self):
"""
Delete all elements which fulfill the filter criteria (defined in the where method);
:return: the deleted item count
"""
raise NotImplementedError('abstract method')
def get(self, page=0, page_size=100):
"""
Returns the list of found Model instances;
:param page: the current page requested
:param page_size: the size of the page (number of elements requested
:return: the result of the query as a list of Model instance objects
"""
raise NotImplementedError('abstract method')
def mongo_type_converter_to_dict(value: any) -> any:
if isinstance(value, Decimal):
return float(value)
else:
return value
def mongo_type_converter_from_dict(value: any) -> any:
return value
class MongoQuery(Query):
def __init__(self, connection_object: pymongo.collection.Collection, user_class, *expressions):
super().__init__(*expressions)
self.connection: pymongo.collection.Collection = connection_object
self.user_class = user_class
def find(self, page: int = 0, page_size: int = 100) -> Model:
"""
Returns a generator for the number of pages
:param page: current page
:param page_size: number of elements
:return: a generator which can be used in an iteration
"""
if len(self.sorting_expr) == 0:
cursor = self.connection.find(self.filter_expr).skip(page * page_size).limit(page_size)
else:
cursor = self.connection.find(self.filter_expr).sort(self.sorting_expr).skip(page * page_size).limit(
page_size)
if cursor:
for item in cursor:
yield Model.from_dict(item, self.user_class, convert_ids=True,
converter_func=mongo_type_converter_from_dict)
def get(self, page: int = 0, page_size: int = 100) -> list:
"""
Return the complete list of all items corresponding to the query
:param page: current page
:param page_size: the number of elements
:return: a list of all items corresponding the query
"""
return [item for item in self.find(page=page, page_size=page_size)]
def find_one(self):
"""
:return: one instance of the Model or None
:rtype: Model
"""
hit = self.connection.find_one(self.filter_expr)
return Model.from_dict(hit, self.user_class, convert_ids=True,
converter_func=mongo_type_converter_from_dict) if hit else None
def delete(self) -> int:
"""
:return: the delete count
"""
return self.connection.delete_many(self.filter_expr).deleted_count
def count(self) -> int:
return self.connection.count(self.filter_expr)
def __get_update_expression(self, **update_expression):
update_dict = dict()
for key, exp in update_expression.items():
opname = str(exp.ops)
op_expr = update_dict.get(opname, {})
op_expr[key] = exp.ops.lmbda(exp.rhs)
update_dict[opname] = op_expr
return update_dict
def find_one_and_update(self, **update_expression):
upd = self.__get_update_expression(**update_expression)
hit = self.connection.find_one_and_update(self.filter_expr, upd, return_document=ReturnDocument.AFTER)
return Model.from_dict(hit, self.user_class, convert_ids=True,
converter_func=mongo_type_converter_from_dict) if hit else None
def update_one(self, **update_expression) -> int:
upd = self.__get_update_expression(**update_expression)
update_result = self.connection.update_one(self.filter_expr, upd, upsert=False)
return update_result.modified_count
def update_many(self, **update_expression) -> int:
upd = self.__get_update_expression(**update_expression)
update_result = self.connection.update_many(self.filter_expr, upd, upsert=False)
return update_result.modified_count
class RepositoryException(AppKernelException):
def __init__(self, message):
super().__init__(message)
class Repository(object):
@classmethod
def find_by_id(cls, object_id):
"""
Find an object identified by the unique database id
:param object_id: the database id
:return:
"""
raise NotImplementedError('abstract method')
@classmethod
def delete_by_id(cls, object_id):
"""
Delete the object identified by ID
:param object_id: the unique object ID
:return:
"""
raise NotImplementedError('abstract method')
@classmethod
def create_object(cls, document):
"""
Insert a new object in the database
:param document:
:return:
"""
raise NotImplementedError('abstract method')
@classmethod
def replace_object(cls, object_id, document):
"""
Replace the object in the database.
:param object_id:
:param document:
:return:
"""
raise NotImplementedError('abstract method')
@classmethod
def patch_object(cls, document, object_id=None):
raise NotImplementedError('abstract method')
@classmethod
def save_object(cls, document, object_id=None):
raise NotImplementedError('abstract method')
@classmethod
def find(cls, *expressions):
"""
:param expressions:
:type expressions: Expression
:return: a Model Generator
"""
raise NotImplementedError('abstract method')
@classmethod
def find_one(cls, *expressions):
"""
Returns one single instance of the Model.
:param expressions:
:type expressions: Expression
:return: one Model object
:rtype: Model
"""
raise NotImplementedError('abstract method')
@classmethod
def where(cls, *expressions):
"""
Creates and returns a query object, used for further chaining functions like sorting and pagination;
:param expressions: the query filter expressions used to narrow the result-set
:return: a query object preconfigured with the
:rtype: Query
"""
raise NotImplementedError('abstract method')
@classmethod
def find_by_query(cls, query={}, page=1, page_size=50, sort_by=None, sort_order=SortOrder.ASC):
"""
:param query:
:type query: dict
:param page:
:type page: int
:param page_size:
:type page_size: int
:param sort_by:
:param sort_order:
:return:
"""
raise NotImplementedError('abstract method')
@classmethod
def create_cursor_by_query(cls, query):
raise NotImplementedError('abstract method')
@classmethod
def update_many(cls, match_query_dict, update_expression_dict):
"""
:param match_query_dict:
:param update_expression_dict:
:return:
"""
raise NotImplementedError('abstract method')
@classmethod
def delete_many(cls, match_query_dict):
"""
:param match_query_dict:
:return:
"""
raise NotImplementedError('abstract method')
@classmethod
def delete_all(cls):
"""
:return:
"""
raise NotImplementedError('abstract method')
@classmethod
def count(cls, query_filter={}):
"""
Return the number of items matching the query filter
:param query_filter: the raw query type as a dict (using the mongo syntax)
:type query_filter: dict
:return:
"""
raise NotImplementedError('abstract method')
def save(self):
"""
Saves or updates a model instance in the database
:return: the id of the inserted or updated document
"""
raise NotImplementedError('abstract method')
def delete(self):
"""
Delete the current instance.
:raises RepositoryException: in case the instance was not deleted.
"""
raise NotImplementedError('abstract method')
class MongoRepository(Repository):
@classmethod
def init_indexes(cls):
if issubclass(cls, Model):
index_factories = {
Index: MongoRepository.create_index,
TextIndex: MongoRepository.create_text_index,
UniqueIndex: MongoRepository.create_unique_index
}
for key, value in cls.__dict__.items():
if isinstance(value, Property):
if value.index:
fct = index_factories.get(value.index, MongoRepository.not_supported)
fct(cls.get_collection(), key,
value.index.sort_order if hasattr(value.index, 'sort_order') else SortOrder.ASC)
@staticmethod
def version_check(required_version_tuple):
server_info = config.mongo_database.client.server_info()
current_version = tuple(int(i) for i in server_info['version'].split('.'))
if current_version < required_version_tuple:
raise AppKernelException(
'This feature requires a min version of: {}'.format('.'.join(required_version_tuple)))
@classmethod
def add_schema_validation(cls, validation_action='warn'):
"""
:param validation_action: warn or error (MongoDB logs any violations but allows the insertion or update to proceed)
:return:
"""
MongoRepository.version_check(tuple([3, 6, 0]))
try:
config.mongo_database.create_collection(xtract(cls))
except CollectionInvalid:
# schema not found
pass
config.mongo_database.command(
'collMod', xtract(cls),
validator={'$jsonSchema': cls.get_json_schema(mongo_compatibility=True)},
validationLevel='moderate',
validationAction=validation_action
)
@staticmethod
def create_index(collection, field_name, sort_order, unique=False):
# type: (pymongo.collection.Collection, str, SortOrder, bool) -> ()
"""
Args:
collection(pymongo.collection.Collection): the collection to which the index is applied to
field_name(str): the name of the document field which is being indexed
sort_order(SortOrder): the sort order
unique(bool): if true (false by default) it will create a unique index
"""
if field_name not in collection.index_information():
if isinstance(sort_order, SortOrder):
direction = pymongo.ASCENDING if sort_order == SortOrder.ASC else pymongo.DESCENDING
else:
direction = sort_order
collection.create_index(
[(field_name, direction)],
unique=unique, background=True, name='{}_idx'.format(field_name))
@staticmethod
def create_text_index(collection, field_name, *args):
# type: (pymongo.collection.Collection, str, SortOrder, bool) -> ()
MongoRepository.create_index(collection, field_name, pymongo.TEXT)
@staticmethod
def create_unique_index(collection, field_name, sort_order):
MongoRepository.create_index(collection, field_name, sort_order, unique=True)
@staticmethod
def not_supported(*args):
pass
@classmethod
def get_collection(cls) -> pymongo.collection.Collection:
"""
:return: the collection for this model object
:rtype: Collection
"""
db = config.mongo_database
if db is not None:
return db.get_collection(xtract(cls))
else:
raise AppKernelException('The database engine is not set')
@classmethod
def find_by_id(cls, object_id):
assert object_id, 'the id of the lookup object must be provided'
if isinstance(object_id, str) and object_id.startswith(OBJ_PREFIX):
object_id = ObjectId(object_id.split(OBJ_PREFIX)[1])
document_dict = cls.get_collection().find_one({'_id': object_id})
return Model.from_dict(document_dict, cls, convert_ids=True,
converter_func=mongo_type_converter_from_dict) if document_dict else None
@classmethod
def delete_by_id(cls, object_id):
"""
Deletes a document identified by the object id
:param object_id:
:return: true if the object was deleted
"""
delete_result = cls.get_collection().delete_one({'_id': object_id})
return delete_result.deleted_count
@staticmethod
def prepare_document(document, object_id=None):
if isinstance(document, Model):
document_id = document.id
has_id = document_id is not None
document = Model.to_dict(document, convert_id=True, converter_func=mongo_type_converter_to_dict)
elif not isinstance(document, dict):
raise RepositoryException('Only dictionary or Model is accepted.')
else:
document_id = object_id or document.get('id') or document.get('_id')
has_id = document_id is not None
return has_id, document_id, document
@classmethod
def patch_object(cls, document, object_id=None):
return cls.__save_or_update_dict(document, object_id=object_id, insert_if_none_found=False)
@classmethod
def __save_or_update_dict(cls, document, object_id=None, insert_if_none_found: bool = True):
has_id, document_id, document = MongoRepository.prepare_document(document, object_id)
if has_id:
update_result = cls.get_collection().update_one({'_id': document_id}, {'$set': document},
upsert=insert_if_none_found)
db_id = update_result.upserted_id or (document_id if update_result.matched_count > 0 else None)
else:
insert_result = cls.get_collection().insert_one(document)
db_id = insert_result.inserted_id # pylint: disable=C0103
return db_id
@classmethod
def save_object(cls, model: Model, object_id: str = None, insert_if_none_found: bool = True) -> object:
assert model, 'the object must be handed over as a parameter'
assert isinstance(model, Model), 'the object should be a Model'
document = Model.to_dict(model, convert_id=True, converter_func=mongo_type_converter_to_dict)
model.id = cls.__save_or_update_dict(document=document, object_id=object_id)
return model.id
@classmethod
def replace_object(cls, model: Model):
assert model, 'the document must be provided before replacing'
document = Model.to_dict(model, convert_id=True, converter_func=mongo_type_converter_to_dict)
has_id, document_id, document = MongoRepository.prepare_document(document, None)
update_result = cls.get_collection().replace_one({'_id': document_id}, document, upsert=False)
return (update_result.upserted_id or document_id) if update_result.matched_count > 0 else None
@classmethod
def bulk_insert(cls, list_of_model_instances):
return cls.get_collection().insert_many(
[Model.to_dict(model, convert_id=True, converter_func=mongo_type_converter_to_dict) for model in
list_of_model_instances]).inserted_ids
@classmethod
def find(cls, *expressions):
return MongoQuery(cls.get_collection(), cls, *expressions).find()
@classmethod
def find_one(cls, *expressions):
return MongoQuery(cls.get_collection(), cls, *expressions).find_one()
@classmethod
def where(cls, *expressions) -> MongoQuery:
"""
Creates and returns a query object, used for further chaining functions like sorting and pagination;
:param expressions: the query filter expressions used to narrow the result-set
:return: a query object precofigured with the
:rtype: MongoQuery
"""
return MongoQuery(cls.get_collection(), cls, *expressions)
@classmethod
def find_by_query(cls, query={}, page=1, page_size=50, sort_by=None, sort_order=SortOrder.ASC):
"""
query using mongo's built-in query language
:param sort_order:
:param sort_by:
:param page_size:
:param page:
:param query: the query expression as a dictionary
:return: a generator with the query results
"""
cursor = cls.get_collection().find(query).skip((page - 1) * page_size).limit(page_size)
if sort_by:
py_direction = pymongo.ASCENDING if sort_order == SortOrder.ASC else pymongo.DESCENDING
cursor.sort(sort_by, direction=py_direction)
return [Model.from_dict(result, cls, convert_ids=True, converter_func=mongo_type_converter_from_dict) for result
in cursor]
@classmethod
def create_cursor_by_query(cls, query):
cursor = cls.get_collection().find(query)
return (Model.from_dict(result, cls, convert_ids=True, converter_func=mongo_type_converter_from_dict) for result
in cursor)
@classmethod
def update_many(cls, match_query_dict, update_expression_dict):
"""
updates multiple documents in the database
:param match_query_dict: the query expression to match the documents to be updated
:param update_expression_dict:
:return: the number of modified documents
"""
update_result = cls.get_collection().update_many(match_query_dict, update_expression_dict)
return update_result.modified_count
@classmethod
def delete_many(cls, match_query_dict):
return cls.get_collection().delete_many(match_query_dict).deleted_count
@classmethod
def delete_all(cls):
"""
deletes all documents from the collection
:return: the count of deleted documents
"""
return cls.get_collection().delete_many({}).deleted_count
@classmethod
def count(cls, query_filter={}):
return cls.get_collection().count(query_filter)
@classmethod
def aggregate(cls, pipe=[], allow_disk_use=True, batch_size=100):
cursor = cls.get_collection().aggregate(pipe, allowDiskUse=allow_disk_use, batchSize=batch_size)
return [result for result in cursor]
def save(self):
self.id = self.__class__.save_object(self) # pylint: disable=C0103
return self.id
def delete(self):
assert self.id is not None
deleted_count = self.get_collection().delete_one({'_id': self.id}).deleted_count
if deleted_count != 1:
raise RepositoryException("the instance couldn't be deleted")
class AuditableRepository(MongoRepository):
def __init__(self, **kwargs):
super(AuditableRepository, self).__init__()
@classmethod
def save_object(cls, model: Model, object_id=None):
document = Model.to_dict(model, convert_id=True, converter_func=mongo_type_converter_to_dict)
has_id, doc_id, document = MongoRepository.prepare_document(document, object_id)
now = datetime.now()
document.update(updated=now)
if has_id:
# it is an update or a first insert with generated ID
if 'version' in document:
del document['version']
if 'inserted' in document:
del document['inserted']
upsert_expression = {
'$set': document,
'$setOnInsert': {'inserted': now},
'$inc': {'version': 1}
}
update_result = cls.get_collection().update_one({'_id': doc_id}, upsert_expression, upsert=True)
db_id = update_result.upserted_id or doc_id
else:
# it is an insert for sure, we initialise the audit fields
document.update(inserted=now, version=1)
insert_result = cls.get_collection().insert_one(document)
db_id = insert_result.inserted_id
model.id = db_id
return model.id
def save(self):
self.__class__.save_object(self)
return self.id
|
python
|
# Generated by Django 3.0.11 on 2021-01-22 10:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cars', '0001_initial'),
('users', '0002_auto_20210122_0713'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='BankAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bank', models.CharField(max_length=32)),
('agency', models.CharField(max_length=16)),
('balance', models.FloatField(default=0)),
],
),
migrations.CreateModel(
name='Sale',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('active', models.BooleanField(default=True, verbose_name='Active')),
('value', models.FloatField()),
('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cars.Car')),
('customer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='users.Customer')),
('seller', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Purchase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='Created at')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('active', models.BooleanField(default=True, verbose_name='Active')),
('value', models.FloatField()),
('buyer_for', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
('car', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cars.Car')),
('provider', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='users.Customer')),
],
options={
'abstract': False,
},
),
]
|
python
|
import math
N = int(input())
sqN = math.floor(math.sqrt(N))
yaku1 = 1
yaku2 = 1
for i in range(sqN, 0, -1):
if N % i == 0:
yaku1 = i
yaku2 = N // i
break
print(yaku1+yaku2-2)
|
python
|
import asyncio
import pytest
import unittest
from unittest.mock import MagicMock, patch
from app import Application
@pytest.mark.asyncio
async def test_func1():
app = Application()
func2_stub = MagicMock(return_value='future result!')
func2_coro = asyncio.coroutine(func2_stub)
async with patch.object(Application, 'func2', return_value=func2_coro) as mock:
res = await app.func1()
print(res)
# mock.assert_awaited_with(app.func3())
|
python
|
#先引入后面分析、可视化等可能用到的库
import tushare as ts
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sqlalchemy import create_engine
import psycopg2
#正常显示画图时出现的中文和负号
from pylab import mpl
mpl.rcParams['font.sans-serif']=['SimHei']
mpl.rcParams['axes.unicode_minus']=False
#设置token
token = '7dc39867da616d1570e708a70325d4f51836fdec52cd8c3fc92885b6'
pro = ts.pro_api(token)
#数据获取函数,默认时间可以随时改动
#如果报错,把tushare升级到最新
def get_data(code,start='20190101',end='20190425'):
df=ts.pro_bar(ts_code=code, adj='qfq', start_date=start, end_date=end)
return df
#交易代码获取函数,获取最新交易日的代码
#获取当前交易日最新的股票代码和简称
def get_code():
codes = pro.stock_basic(list_status='L').ts_code.values
return codes
engine = create_engine('postgresql+psycopg2://postgres:123456@localhost:5432/postgres')
def insert_sql(data,db_name,if_exists='append'):
#使用try...except..continue避免出现错误,运行崩溃
try:
data.to_sql(db_name,engine,index=False,if_exists=if_exists)
#print(code+'写入数据库成功')
except:
pass
#下载20190101-20190425数据并插入数据库stock_data
#此步骤比较耗费时间,大致25-35分钟左右
for code in get_code():
data=get_data(code)
insert_sql(data,'stock_data')
#读取整张表数据
df=pd.read_sql('stock_data',engine)
print(len(df))
|
python
|
# Copyright (c) 2019 leosocy. All rights reserved.
# Use of this source code is governed by a MIT-style license
# that can be found in the LICENSE file.
import io
import os
from setuptools import setup
import edcc
# Package meta-data.
NAME = "edcc"
DESCRIPTION = "EDCC: An efficient and accurate algorithm for palmprint recognition."
URL = "https://github.com/Leosocy/EDCC-Palmprint-Recognition"
EMAIL = "[email protected]"
AUTHOR = "Leosocy"
VERSION = edcc.__version__
root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
edcc_classifiers = [
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Libraries",
]
try:
with io.open(os.path.join(root, "README.md"), encoding="utf-8") as f:
long_description = "\n" + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
author=AUTHOR,
author_email=EMAIL,
python_requires=">=3",
url=URL,
packages=["edcc"],
package_dir={"edcc": "edcc"},
include_package_data=True,
license="MIT",
classifiers=edcc_classifiers,
)
|
python
|
from toolkit.modules.make_follow_sets import follow_sets
from toolkit.modules.make_first_sets import first_sets
from toolkit.modules.grammar import is_terminal
from tabulate import tabulate
def parsing_table(pgrammar, fs, fls, error_recovery=True):
"""
Input:
pgrammar: parsed grammar
fs: first sets
fls: follow sets
error_recovery: fill parsing table with pop/scan values for error cells
"""
# nonterminals with eps in their first sets
nullables = [k for k in pgrammar.keys() if "eps" in fs[k]]
# TODO: rewrite this loop better
terminals = set()
for prod in pgrammar.values():
for rule in prod:
for sym in rule.split():
if is_terminal(sym, pgrammar) and sym != "eps":
terminals.add(sym)
if not terminals:
return
terminals = list(terminals)
terminals.append("$")
table = []
for nt, prod in pgrammar.items():
row = [None] * len(terminals)
for rule in prod:
for sym in rule.split():
eps = False
if sym == "eps":
eps = True
else:
if is_terminal(sym, pgrammar):
row[terminals.index(sym)] = "{} -> {}".format(nt, rule)
else:
for fse in fs[sym]:
if fse == "eps":
eps = True
else:
row[terminals.index(fse)] = "{} -> {}".format(nt, rule)
if eps:
for flse in fls[nt]:
row[terminals.index(flse)] = "{} -> {}".format(nt, rule)
if not eps and sym not in nullables:
break
table.append([nt] + row)
if error_recovery:
for row in table:
# row[0] is the non-terminal
for flse in fls[row[0]]:
# + 1 because we also added a non-terminal
ix = terminals.index(flse) + 1
if row[ix] is None:
row[ix] = "Pop({})".format(row[0])
# fill remaining values with 'scan'
for i in range(1, len(row)):
if row[i] is None:
row[i] = "scan"
return tabulate(table, headers=["input"] + terminals)
# if __name__ == "__main__":
# import grammar as gm
# # grammar = """
# # X -> a X | g | Y Z | eps
# # Y -> d | u Y | eps
# # Z -> i | eps
# # """
# grammar = """
# E -> T E'
# E' -> + T E' | eps
# T -> F T'
# T' -> * F T' | eps
# F -> id | ( E )
# """
# pgrammar = gm.parse(grammar)
# fs = first_sets(pgrammar)
# fls = follow_sets("E", pgrammar, fs)
# # print("first sets:")
# # gm.set_print(fs)
# # print("follow sets:")
# # gm.set_print(fls)
# make_parsing_table(pgrammar, fs, fls)
|
python
|
class MemcacheError(Exception):
pass
class MemcacheServerError(Exception):
def __init__(self, server: str, message: str) -> None:
self.server = server
super().__init__(message)
|
python
|
watchdog_config = """
# SDSLabs Watchdog configuration START
UsePAM yes
PasswordAuthentication no
AuthorizedKeysCommand /opt/watchdog/bin/watchdog auth -u %u -t %t -p %k
AuthorizedKeysCommandUser root
# SDSLabs Watchdog configuration END
"""
modified_options = [
'AuthorizedKeysCommand',
'AuthorizedKeysCommandUser',
'PasswordAuthentication',
'UsePAM'
]
inside_watchdog_config = False
def process_line(line):
global inside_watchdog_config
if inside_watchdog_config and line == "# SDSLabs Watchdog configuration END\n":
inside_watchdog_config = False
return ''
if inside_watchdog_config:
return ''
if line == "# SDSLabs Watchdog configuration START\n":
inside_watchdog_config = True
return ''
l = line.strip()
i = l.find('#')
if i != -1:
l = l[:i]
if len(l) == 0:
return line
i = l.find(' ')
j = l.find('\t')
if i == -1 and j != -1:
i = j
elif j == -1 and i != -1:
pass
elif j == -1 and i == -1:
return line
else:
i = min(i, j)
key = l[:i]
value = l[i+1:].strip()
if key in modified_options:
# comment this line
return '# Watchdog: Commenting the line below out\n#' + line
else:
return line
def main():
inp = open("/etc/ssh/sshd_config")
out = open("watchdog_tmp_sshd_config", "w")
lines = inp.readlines()
for l in lines:
output_line = process_line(l)
out.write(output_line)
out.write(watchdog_config)
inp.close()
out.close()
main()
|
python
|
#!/usr/bin/env python
#author [email protected]
#NOTE: FOR GOFLEX OPERATIONS DONT CHANGE THE CONTENTS OF THIS FILE
#REQUEST BUG FIXES OR ENHANCEMENTS AS NECESSARY
class GoFlexMessageFormatter():
def __init__(self):
pass
def request_meter_data(self, meter, from_date, to_date):
return {
"serviceRequest": {
"service": {
"name": "TimeseriesService",
"args": {
"cmd": "ts/get_timeseries_values",
"device_id": meter,
"from": from_date,
"to": to_date
}
}
}
}
def request_meter_list(self):
return {
"serviceRequest": {
"service": {
"name": "TimeseriesService",
"args": {
"cmd": "ts/get_time_series"
}
}
}
}
def store_time_series(self, values):
return {
"serviceRequest": {
"service": {
"name": "TimeseriesService",
"args": {
"cmd": "ts/store_timeseries_values",
"values": values
}
}
}
}
def average_time_series(self, meter, from_date, to_date):
return {
"serviceRequest": {
"service": {
"name": "TimeseriesService",
"args": {
"cmd": "ts/average_timeseries_values",
"device_id": meter,
"from": from_date,
"to": to_date
}
}
}
}
def register_model(self, model_name, entity_name, signal_name):
return {
"serviceRequest": {
"service": {
"name": "TimeseriesService",
"args": {
"cmd": "register_model",
"model_name": model_name,
"entity": entity_name,
"signal": signal_name
}
}
}
}
def request_model_time_series(self, model_name, entity_name, signal_name):
return {
"serviceRequest": {
"service": {
"name": "TimeseriesService",
"args": {
"cmd": "get_model_timeseries",
"model_name": model_name,
"entity": entity_name,
"signal": signal_name
}
}
}
}
def keyValueService(self, cmd, keys):
return {
"serviceRequest": {
"service": {
"name": "KeyValueService",
"args": {
"cmd": cmd,
"keys": keys
}
}
}
}
def weatherServiceTwoDayHourlyForecast(self, api_key, lat, lng):
return {
"serviceRequest": {
"service" : {
"name" : "WeatherService-TwoDayHourlyForecast-External",
"args" : {
"apiKey" : api_key,
"latitude" : lat,
"longitude" : lng
}
}
}
}
def weatherServiceSolar15DayHourlyForecast(self, api_key, lat, lng):
return {
"serviceRequest": {
"service" : {
"name" : "WeatherService-Solar15DayHourlyForecast-External",
"args" : {
"apiKey" : api_key,
"latitude" : lat,
"longitude" : lng
}
}
}
}
def weatherServiceCleanedHistorical(self, api_key, lat, lng, start, count):
return {
"serviceRequest": {
"service" : {
"name" : "WeatherService-CleanedHistorical-External",
"args" : {
"apiKey" : api_key,
"latitude" : lat,
"longitude" : lng,
"startDate" : start,
"numDays" : count
}
}
}
}
|
python
|
from .gradient_penalty import *
from .wasserstain_div import *
|
python
|
# -*- coding: utf-8 -*-
import sys
from formalchemy import templates
__doc__ = """
There is two configuration settings available in a global config object.
- encoding: the global encoding used by FormAlchemy to deal with unicode. Default: utf-8
- engine: A valide :class:`~formalchemy.templates.TemplateEngine`
- date_format: Used to format date fields. Default to %Y-%d-%m
- date_edit_format: Used to retrieve field order. Default to m-d-y
Here is a simple example::
>>> from formalchemy import config
>>> config.encoding = 'iso-8859-1'
>>> config.encoding
'iso-8859-1'
>>> from formalchemy import templates
>>> config.engine = templates.TempitaEngine
There is also a convenience method to set the configuration from a config file::
>>> config.from_config({'formalchemy.encoding':'utf-8',
... 'formalchemy.engine':'mako',
... 'formalchemy.engine.options.input_encoding':'utf-8',
... 'formalchemy.engine.options.output_encoding':'utf-8',
... })
>>> config.from_config({'formalchemy.encoding':'utf-8'})
>>> config.encoding
'utf-8'
>>> isinstance(config.engine, templates.MakoEngine)
True
"""
class Config(object):
__doc__ = __doc__
__name__ = 'formalchemy.config'
__file__ = __file__
__data = dict(
encoding='utf-8',
date_format='%Y-%m-%d',
date_edit_format='m-d-y',
engine = templates.default_engine,
)
def __getattr__(self, attr):
if attr in self.__data:
return self.__data[attr]
else:
raise AttributeError('Configuration has no attribute %s' % attr)
def __setattr__(self, attr, value):
meth = getattr(self, '__set_%s' % attr, None)
if callable(meth):
meth(value)
else:
self.__data[attr] = value
def __set_engine(self, value):
if isinstance(value, templates.TemplateEngine):
self.__data['engine'] = value
else:
raise ValueError('%s is not a template engine')
def _get_config(self, config, prefix):
values = {}
config_keys = config.keys()
for k in config_keys:
if k.startswith(prefix):
v = config.pop(k)
k = k[len(prefix):]
values[k] = v
return values
def from_config(self, config, prefix='formalchemy.'):
from formalchemy import templates
engine_config = self._get_config(config, '%s.engine.options.' % prefix)
for k, v in self._get_config(config, prefix).items():
if k == 'engine':
engine = templates.__dict__.get('%sEngine' % v.title(), None)
if engine is not None:
v = engine(**engine_config)
else:
raise ValueError('%sEngine does not exist' % v.title())
self.__setattr__(k, v)
def __repr__(self):
return "<module 'formalchemy.config' from '%s' with values %s>" % (self.__file__, self.__data)
sys.modules['formalchemy.config'] = Config()
|
python
|
'''
Copyright (c) 2021-2022 OVGU LIA
Author: Harish Kumar Pakala
This source code is licensed under the Apache License 2.0 (see LICENSE.txt).
This source code may use other Open Source software components (see LICENSE.txt).
'''
try:
import queue as Queue
except ImportError:
import Queue as Queue
class DataManager(object):
'''
classdocs
'''
def __init__(self, pyAAS):
'''
Constructor
'''
self.pyAAS = pyAAS
self.InBoundProcessingQueue = Queue.Queue()
self.outBoundProcessingDict = {}
def pushInboundMessage(self,msg):
self.InBoundProcessingQueue.put(msg)
def configure(self):
self.pyAAS.serviceLogger.info('The Database manager is being configured')
def start(self):
self.POLL = True
self.pyAAS.serviceLogger.info('The Database manager is being started')
while self.POLL:
if (self.InBoundProcessingQueue).qsize() != 0:
inMessage = self.InBoundProcessingQueue.get()
if inMessage["functionType"] == 1:
dba = self.pyAAS.dba
_dba_method = getattr(dba,inMessage['method'])
self.outBoundProcessingDict[inMessage["instanceid"]] = _dba_method(inMessage['data'])
elif inMessage['functionType'] == 3:
dba = self.pyAAS.dba
(dba.saveNewConversationMessage(inMessage['conversationId'],inMessage['messageType'],inMessage["messageId"],inMessage["message"]))
self.pyAAS.serviceLogger.info('The Database manager is started')
def stop(self):
self.pyAAS.serviceLogger.info('The Database manager is being stopped')
self.POLL = False
self.pyAAS.serviceLogger.info('The Database manager is stopped')
def update(self):
pass
|
python
|
# Последовательность треугольных чисел образуется путем сложения натуральных чисел. К примеру, 7-ое треугольное число
# равно 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28. Первые десять треугольных чисел:
#
# 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
#
# Перечислим делители первых семи треугольных чисел:
#
# 1: 1
# 3: 1, 3
# 6: 1, 2, 3, 6
# 10: 1, 2, 5, 10
# 15: 1, 3, 5, 15
# 21: 1, 3, 7, 21
# 28: 1, 2, 4, 7, 14, 28
# Как мы видим, 28 - первое треугольное число, у которого более пяти делителей.
#
# Каково первое треугольное число, у которого более пятисот делителей?
import math
from itertools import count
def get_amount_of_dividers(number):
amount = 2
for i in range(2, int(math.sqrt(number))):
if number % i == 0:
amount += 2
if math.sqrt(number) is float:
amount -= 1
return amount
def main():
for i in count(1):
number = sum(range(1, i))
amount_of_dividers = get_amount_of_dividers(number)
if amount_of_dividers >= 500:
print(f'{number} - кол-во делителей: {amount_of_dividers}')
break
if __name__ == '__main__':
main()
|
python
|
from django.conf import settings
if settings.WITH_WQDB:
from wq.db import rest
from wq.db.patterns import serializers as patterns
from .models import Note
rest.router.register_model(
Note,
serializer=patterns.NaturalKeyModelSerializer,
fields="__all__",
)
|
python
|
# Introduction to Python
# Structure of if statements
"""
if condition:
Statements
elif condition:
Statements
else:
Statements
"""
#Grade of a student
marks = 90
# No braces in Python, Indectation does the job
if marks > 90:
print("Grade O")
elif marks > 80:
print("Grade E")
elif marks > 70:
print("Grade A")
elif marks > 60:
print("Grade B")
elif marks > 50:
print("Grade C")
else:
print("Better luck next time")
# Divisible or not
number1 = 45
number2 = 5
if number1%number2 == 0:
print("Divisible")
else:
print("not divisible")
|
python
|
class DeprecatedEnv(ImportError):
pass
|
python
|
#!/usr/bin/env python
# coding: utf-8
# ## Case Challenge Part I (Individual Assignment 1)
# After three years serving customers across the San Francisco Bay Area, the executives at
# Apprentice Chef have decided to take on an analytics project to better understand how much
# revenue to expect from each customer within their first year of using their services. Thus, they
# have hired you on a full-time contract to analyze their data, develop your top insights, and build a
# machine learning model to predict revenue over the first year of each customer’s life cycle. They
# have explained to you that for this project, they are not interested in a time series analysis and
# instead would like to “keep things simple” by providing you with a dataset of aggregated
# customer information.
# ## Part 1: Data Exploration
# <h3> Package imports, peaking into data and checking for missing values
# In[1]:
# Importing libraries
# Importing libraries
import pandas as pd # Data science essentials
import matplotlib.pyplot as plt # Essential graphical output
import seaborn as sns # Enhanced graphical output
import numpy as np # Mathematical essentials
import statsmodels.formula.api as smf # Regression modeling
from os import listdir # Look inside file directory
from sklearn.model_selection import train_test_split # Split data into training and testing data
import gender_guesser.detector as gender # Guess gender based on (given) name
from sklearn.linear_model import LinearRegression # OLS Regression
import sklearn.linear_model # Linear models
from sklearn.neighbors import KNeighborsRegressor # KNN for Regression
from sklearn.preprocessing import StandardScaler # standard scaler
import openpyxl
# setting pandas print options
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# Filepath
file = './Apprentice_Chef_Dataset.xlsx'
# Importing the dataset
apprentice = pd.read_excel(io=file)
# formatting and printing the dimensions of the dataset
print(f"""
Size of Original Dataset
------------------------
Observations: {apprentice.shape[0]}
Features: {apprentice.shape[1]}
There are {apprentice.isnull().any().sum()} missing values
""")
# In[2]:
# Look at the data
apprentice.head()
# In[3]:
# Checking for missing values
apprentice.isnull().any()
# The missing value is in Family name, which will not be used
# <hr style="height:.9px;border:none;color:#333;background-color:#333;" /><br>
# <h3>Analyzing the Distribution of Revenues</h3>
# <h4>Develop a histogram to analyze the distribution of the Y-variable.</h4>
# In[4]:
# Histogram to check distribution of the response variable
sns.displot(data=apprentice,
x='REVENUE',
height=5,
aspect=2)
# displaying the histogram
plt.show()
# <h4>Develop a histogram to analyze the distribution of the log of the Y-variable.</h4>
# In[5]:
# log transforming Sale_Price and saving it to the dataset
apprentice['log_REVENUE'] = np.log10(apprentice['REVENUE'])
# developing a histogram using for log Revenue
sns.displot(data=apprentice,
x='log_REVENUE',
height=5,
aspect=2)
# displaying the histogram
plt.show()
# The log data is a bit better although there is still that under represented data point
# <hr style="height:.9px;border:none;color:#333;background-color:#333;" /><br>
#
# <h3>Based on the outputs above, identify the data type of each original variable in the dataset.</h3><br>
# Use the following groupings:
#
# * CONTINUOUS
# * INTERVAL/COUNT
# * CATEGORICAL
# <hr style="height:.9px;border:none;color:#333;background-color:#333;" /><br>
#
# ## Part 2: Trend Based Features
# <h3>Checking the Continuous Data</h3>
# In[6]:
########################
# Visual EDA (Scatterplots)
########################
# setting figure size
fig, ax = plt.subplots(figsize=(10, 8))
# developing a scatterplot
plt.subplot(2, 2, 1)
sns.scatterplot(x=apprentice['AVG_TIME_PER_SITE_VISIT'],
y=apprentice['REVENUE'],
color='g')
# adding labels but not adding title
plt.xlabel(xlabel='Average Visit Time')
plt.ylabel(ylabel='Revenue')
########################
# developing a scatterplot
plt.subplot(2, 2, 2)
sns.scatterplot(x=apprentice['AVG_PREP_VID_TIME'],
y=apprentice['REVENUE'],
color='g')
# adding labels but not adding title
plt.xlabel(xlabel='Average Video Time')
plt.ylabel(ylabel='Revenue')
########################
# developing a scatterplot
plt.subplot(2, 2, 3)
sns.scatterplot(x=apprentice['TOTAL_PHOTOS_VIEWED'],
y=apprentice['REVENUE'],
color='orange')
# adding labels but not adding title
plt.xlabel(xlabel='Totals Meals')
plt.ylabel(ylabel='Revenue')
########################
# developing a scatterplot
plt.subplot(2, 2, 4)
sns.scatterplot(x=apprentice['TOTAL_MEALS_ORDERED'],
y=apprentice['REVENUE'],
color='r')
# adding labels but not adding title
plt.xlabel(xlabel='Total Meals')
plt.ylabel(ylabel='Revenue')
# cleaning up the layout and displaying the results
plt.tight_layout()
plt.show()
# It is clear that from the data collection method the Median Meal Rating and Average clicks per visit can be counted in Count data as they are not continuous data
# <h3>Checking the Interval and Count Data</h3>
# In[7]:
# Counting the number of zeroes in the interval data
noon_canc_zeroes = apprentice['CANCELLATIONS_BEFORE_NOON'].value_counts()[0]
after_canc_zeroes = apprentice['CANCELLATIONS_AFTER_NOON'].value_counts()[0]
weekly_log_zeroes = apprentice['WEEKLY_PLAN'].value_counts()[0]
early_meal_zeroes = apprentice['EARLY_DELIVERIES'].value_counts()[0]
late_meal_zeroes = apprentice['LATE_DELIVERIES'].value_counts()[0]
master_class_zeroes = apprentice['MASTER_CLASSES_ATTENDED'].value_counts()[0]
photo_view = apprentice['TOTAL_PHOTOS_VIEWED'].value_counts()[0]
# printing a table of the results
print(f"""
Yes\t\tNo
---------------------
Cancellations Before Noon | {noon_canc_zeroes}\t\t{len(apprentice) - noon_canc_zeroes}
Cancellations After Noon | {after_canc_zeroes}\t\t{len(apprentice) - after_canc_zeroes}
Weekly plan Subscription | {weekly_log_zeroes}\t\t{len(apprentice) - weekly_log_zeroes}
Early Meals. | {early_meal_zeroes}\t\t{len(apprentice) - early_meal_zeroes}
Late Meals. | {late_meal_zeroes}\t\t{len(apprentice) - late_meal_zeroes}
Master Class Attendance | {master_class_zeroes}\t\t{len(apprentice) - master_class_zeroes}
Photo Views. | {photo_view}\t\t{len(apprentice) - photo_view}
""")
# In[8]:
# Dummy Variables for the factors we found above with at leasst 100 observations
apprentice['noon_canc'] = 0
apprentice['after_canc'] = 0
apprentice['weekly_plan_sub'] = 0
apprentice['early_delivery'] = 0
apprentice['late_delivery'] = 0
apprentice['masterclass_att'] = 0
apprentice['view_photo'] = 0
# Iter over eachg column to get the new boolean feature columns
for index, value in apprentice.iterrows():
# For noon cancellations
if apprentice.loc[index, 'CANCELLATIONS_BEFORE_NOON'] > 0:
apprentice.loc[index, 'noon_canc'] = 1
# For afternoon cancelations
if apprentice.loc[index, 'CANCELLATIONS_AFTER_NOON'] > 0:
apprentice.loc[index, 'after_canc'] = 1
# Weekly meal plan subscription
if apprentice.loc[index, 'WEEKLY_PLAN'] > 0:
apprentice.loc[index, 'weekly_plan_sub'] = 1
# Early deliveries
if apprentice.loc[index, 'EARLY_DELIVERIES'] > 0:
apprentice.loc[index, 'early_delivery'] = 1
# Late Deliveries
if apprentice.loc[index, 'LATE_DELIVERIES'] > 0:
apprentice.loc[index, 'late_delivery'] = 1
# Masterclass attendance
if apprentice.loc[index, 'MASTER_CLASSES_ATTENDED'] > 0:
apprentice.loc[index, 'masterclass_att'] = 1
# Viewed Photos
if apprentice.loc[index, 'TOTAL_PHOTOS_VIEWED'] > 0:
apprentice.loc[index, 'view_photo'] = 1
# Another Factor i want to consider is make flags for whether the customer contacted customer services on more than half of their orders and whether the mobile or pc is the preffered route of ordering.
# In[9]:
# Checking distribution
contact_greater = []
mobile_greater = []
# Instantiating dummy variables
for index, value in apprentice.iterrows():
# For noon cancellations
if apprentice.loc[index, 'CONTACTS_W_CUSTOMER_SERVICE'] > (apprentice.loc[index, 'TOTAL_MEALS_ORDERED']) / 2:
contact_greater.append(1)
else:
contact_greater.append(0)
# Instantiating dummy variables
for index, value in apprentice.iterrows():
if apprentice.loc[index, 'MOBILE_LOGINS'] > apprentice.loc[index, 'PC_LOGINS']:
mobile_greater.append(1)
else:
mobile_greater.append(0)
contact_greater = pd.DataFrame(contact_greater)
mobile_greater = pd.DataFrame(mobile_greater) # PC logins are consistently more so we dop
contact_greater.value_counts() # Checking distribution of zeros
# Adding them to the data
apprentice['contact_greater'] = contact_greater
apprentice['mobile_greater'] = mobile_greater
# In[10]:
# <h4>Checking the Count and interval data after dealing with zeroes</h4>
# Some of the count data had significant information in zeroes to split them into some sort of boolean feature. Now, I will plot to distributions of interval to see which data might need transformation to give insight into our model.
# After checking the plots for all the interval data these were the ones needing transformation.
# In[11]:
# setting figure size
fig, ax = plt.subplots(figsize=(15, 10))
## Plot 1: Original X, Original Y ##
plt.subplot(1, 2, 1)
# Plotting
sns.boxplot(x='AVG_CLICKS_PER_VISIT',
y='REVENUE',
data=apprentice
)
# titles and labels
plt.title('Average clicks per visit')
## Plot 1: Original X, Original Y ##
plt.subplot(1, 2, 2)
# Plotting
sns.boxplot(x='CONTACTS_W_CUSTOMER_SERVICE',
y='REVENUE',
data=apprentice
)
# titles and labels
plt.title('Customer Service')
# Showing the displaying
plt.show()
# In[12]:
# Converting to logs and seeing if the data improves
apprentice['log_clicks'] = np.log10(apprentice['AVG_CLICKS_PER_VISIT']) # Average clicks log
apprentice['log_customer'] = np.log10(apprentice['CONTACTS_W_CUSTOMER_SERVICE']) # Customer contact
# setting figure size
fig, ax = plt.subplots(figsize=(15, 10))
## Plot 1: Original X, Original Y ##
plt.subplot(1, 2, 1)
# Plotting
sns.boxplot(x='log_clicks',
y='log_REVENUE',
data=apprentice
)
# titles and labels
plt.title('LOG Average clicks per visit')
## Plot 1: Original X, Original Y ##
plt.subplot(1, 2, 2)
# Plotting
sns.boxplot(x='log_customer',
y='log_REVENUE',
data=apprentice
)
# titles and labels
plt.title('LOG Customer Service')
# Showing the displaying
plt.show()
# In[13]:
# Dummy Variables for the factors we found above with at leasst 100 observations
apprentice['meals_below_fif'] = 0
apprentice['meals_above_two'] = 0
apprentice['unique_meals_above_ten'] = 0
apprentice['cust_serv_under_ten'] = 0
apprentice['click_under_eight'] = 0
# Iter over eachg column to get the new boolean feature columns
for index, value in apprentice.iterrows():
# Total meals greater than 200
if apprentice.loc[index, 'TOTAL_MEALS_ORDERED'] >= 200:
apprentice.loc[index, 'meals_below_fif'] = 1
# Total meals less than 15
if apprentice.loc[index, 'TOTAL_MEALS_ORDERED'] <= 15:
apprentice.loc[index, 'meals_above_two'] = 1
# Unique meals greater 10
if apprentice.loc[index, 'UNIQUE_MEALS_PURCH'] > 10:
apprentice.loc[index, 'unique_meals_above_ten'] = 1
# Customer service less than 10
if apprentice.loc[index, 'CONTACTS_W_CUSTOMER_SERVICE'] < 10:
apprentice.loc[index, 'cust_serv_under_ten'] = 1
# Clicks below 8
if apprentice.loc[index, 'AVG_CLICKS_PER_VISIT'] < 8:
apprentice.loc[index, 'click_under_eight'] = 1
# Adding the new variable
apprentice['freq_customer_service'] = 0
# Instantiating dummy variables
for index, value in apprentice.iterrows():
# For noon cancellations
if apprentice.loc[index, 'CONTACTS_W_CUSTOMER_SERVICE'] > (apprentice.loc[index, 'TOTAL_MEALS_ORDERED']) / 2:
apprentice.loc[index, 'freq_customer_service'] = 1
# In[14]:
# Log transforms
inter_list = ['LARGEST_ORDER_SIZE', 'PRODUCT_CATEGORIES_VIEWED', 'PC_LOGINS',
'TOTAL_MEALS_ORDERED', 'UNIQUE_MEALS_PURCH', 'CONTACTS_W_CUSTOMER_SERVICE']
for item in inter_list:
# Converting to logs and seeing if the data improves
apprentice['log_' + item] = np.log10(apprentice[item])
# <h3>Working with Categorical Data</h3>
# In[15]:
# STEP 1: splitting personal emails
# placeholder list
placeholder_lst = []
# looping over each email address
for index, col in apprentice.iterrows():
# splitting email domain at '@'
split_email = apprentice.loc[index, 'EMAIL'].split(sep='@')
# appending placeholder_lst with the results
placeholder_lst.append(split_email)
# converting placeholder_lst into a DataFrame
email_df = pd.DataFrame(placeholder_lst)
# STEP 2: concatenating with original DataFrame
# renaming column to concatenate
email_df.columns = ['0', 'personal_email_domain']
# concatenating personal_email_domain with friends DataFrame
apprentice = pd.concat([apprentice, email_df['personal_email_domain']],
axis=1)
# In[16]:
# printing value counts of personal_email_domain
apprentice.loc[:, 'personal_email_domain'].value_counts()
# In[17]:
# email domain types
personal_email_domains = ['@gmail.com', '@microsoft.com', '@yahoo.com',
'@msn.com', '@live.com', '@protonmail.com',
'@aol.com', '@hotmail.com', '@apple.com']
# Domain list
domain_lst = []
# looping to group observations by domain type
for domain in apprentice['personal_email_domain']:
if '@' + domain in personal_email_domains:
domain_lst.append('personal')
else:
domain_lst.append('work')
# concatenating with original DataFrame
apprentice['domain_group'] = pd.Series(domain_lst)
# checking results
apprentice['domain_group'].value_counts()
# Created some extra categorical data that we can use to try infer some more statistics
# In[18]:
# one hot encoding categorical variables
one_hot_domain = pd.get_dummies(apprentice['domain_group'])
# joining codings together
apprentice = apprentice.join([one_hot_domain])
# In[19]:
apprentice.describe()
# <hr style="height:.9px;border:none;color:#333;background-color:#333;" /><br>
#
# ## Part 3: Model Testing
# <br>
# In[20]:
# making a copy of housing
apprentice_explanatory = apprentice.copy()
# dropping SalePrice and Order from the explanatory variable set
apprentice_explanatory = apprentice_explanatory.drop(['REVENUE', 'NAME', 'EMAIL', 'FIRST_NAME',
'FAMILY_NAME', 'personal_email_domain', 'domain_group',
'log_REVENUE'], axis=1)
# formatting each explanatory variable for statsmodels
for val in apprentice_explanatory:
print(val, '+')
# In[21]:
# Step 1: build a model
lm_best = smf.ols(formula="""log_REVENUE ~ CROSS_SELL_SUCCESS +
UNIQUE_MEALS_PURCH +
CONTACTS_W_CUSTOMER_SERVICE +
PRODUCT_CATEGORIES_VIEWED +
AVG_PREP_VID_TIME +
LARGEST_ORDER_SIZE +
MEDIAN_MEAL_RATING +
AVG_CLICKS_PER_VISIT +
masterclass_att +
view_photo +
contact_greater +
mobile_greater +
log_clicks +
log_customer +
meals_below_fif +
meals_above_two +
unique_meals_above_ten +
click_under_eight +
freq_customer_service +
log_LARGEST_ORDER_SIZE +
log_PRODUCT_CATEGORIES_VIEWED +
log_TOTAL_MEALS_ORDERED +
log_UNIQUE_MEALS_PURCH +
log_CONTACTS_W_CUSTOMER_SERVICE +
personal +
work """,
data=apprentice)
# Step 2: fit the model based on the data
results = lm_best.fit()
# Step 3: analyze the summary output
print(results.summary())
# In[22]:
# preparing explanatory variable data
x_variables = ['CROSS_SELL_SUCCESS', 'UNIQUE_MEALS_PURCH', 'CONTACTS_W_CUSTOMER_SERVICE',
'PRODUCT_CATEGORIES_VIEWED', 'AVG_PREP_VID_TIME', 'LARGEST_ORDER_SIZE',
'MEDIAN_MEAL_RATING', 'AVG_CLICKS_PER_VISIT', 'masterclass_att',
'view_photo', 'log_clicks', 'log_customer', 'meals_below_fif',
'meals_above_two', 'unique_meals_above_ten', 'click_under_eight',
'freq_customer_service', 'log_LARGEST_ORDER_SIZE', 'log_PRODUCT_CATEGORIES_VIEWED',
'log_TOTAL_MEALS_ORDERED', 'log_UNIQUE_MEALS_PURCH', 'log_CONTACTS_W_CUSTOMER_SERVICE',
'personal', 'work']
apprentice_data = apprentice_explanatory[x_variables]
# preparing the target variable
apprentice_target = apprentice.loc[:, 'log_REVENUE']
# Splitting data
X_train, X_test, y_train, y_test = train_test_split(
apprentice_data,
apprentice_target,
test_size=0.25,
random_state=219)
# In[23]:
# INSTANTIATING a model object
lr = LinearRegression()
# FITTING to the training data
lr_fit = lr.fit(X_train, y_train)
# PREDICTING on new data
lr_pred = lr_fit.predict(X_test)
# SCORING the results
print('OLS Training Score :', lr.score(X_train, y_train).round(4)) # using R-square
print('OLS Testing Score :', lr.score(X_test, y_test).round(4)) # using R-square
lr_train_score = lr.score(X_train, y_train).round(4)
lr_test_score = lr.score(X_test, y_test).round(4)
# displaying and saving the gap between training and testing
print('OLS Train-Test Gap :', abs(lr_train_score - lr_test_score).round(4))
lr_test_gap = abs(lr_train_score - lr_test_score).round(4)
# In[24]:
# zipping each feature name to its coefficient
lr_model_values = zip(apprentice_data.columns,
lr_fit.coef_.round(decimals=4))
# setting up a placeholder list to store model features
lr_model_lst = [('intercept', lr_fit.intercept_.round(decimals=4))]
# printing out each feature-coefficient pair one by one
for val in lr_model_values:
lr_model_lst.append(val)
# checking the results
for pair in lr_model_lst:
print(pair)
# In[25]:
# Making the list a data frame to print later
lr_model_lst = pd.DataFrame(lr_model_lst)
# Naming the Columns
lr_model_lst.columns = ['Variables', 'Coefficients']
# Removing indices for print
lr_model_lst_no_indices = lr_model_lst.to_string(index=False)
# In[26]:
# Importing another library
import sklearn.linear_model # Linear models
# In[27]:
# INSTANTIATING a model object
lasso_model = sklearn.linear_model.Lasso() # default magitude
# FITTING to the training data
lasso_fit = lasso_model.fit(X_train, y_train)
# PREDICTING on new data
lasso_pred = lasso_fit.predict(X_test)
# SCORING the results
print('Lasso Training Score :', lasso_model.score(X_train, y_train).round(4))
print('Lasso Testing Score :', lasso_model.score(X_test, y_test).round(4))
## the following code has been provided for you ##
# saving scoring data for future use
lasso_train_score = lasso_model.score(X_train, y_train).round(4) # using R-square
lasso_test_score = lasso_model.score(X_test, y_test).round(4) # using R-square
# displaying and saving the gap between training and testing
print('Lasso Train-Test Gap :', abs(lr_train_score - lr_test_score).round(4))
lasso_test_gap = abs(lr_train_score - lr_test_score).round(4)
# In[28]:
# zipping each feature name to its coefficient
lasso_model_values = zip(apprentice_data.columns, lasso_fit.coef_.round(decimals=2))
# setting up a placeholder list to store model features
lasso_model_lst = [('intercept', lasso_fit.intercept_.round(decimals=2))]
# printing out each feature-coefficient pair one by one
for val in lasso_model_values:
lasso_model_lst.append(val)
# checking the results
for pair in lasso_model_lst:
print(pair)
# In[29]:
# INSTANTIATING a model object
ard_model = sklearn.linear_model.ARDRegression()
# FITTING the training data
ard_fit = ard_model.fit(X_train, y_train)
# PREDICTING on new data
ard_pred = ard_fit.predict(X_test)
print('ARD Training Score:', ard_model.score(X_train, y_train).round(4))
print('ARD Testing Score :', ard_model.score(X_test, y_test).round(4))
# saving scoring data for future use
ard_train_score = ard_model.score(X_train, y_train).round(4)
ard_test_score = ard_model.score(X_test, y_test).round(4)
# displaying and saving the gap between training and testing
print('ARD Train-Test Gap :', abs(ard_train_score - ard_test_score).round(4))
ard_test_gap = abs(ard_train_score - ard_test_score).round(4)
# In[30]:
# zipping each feature name to its coefficient
ard_model_values = zip(apprentice_data.columns, ard_fit.coef_.round(decimals=5))
# setting up a placeholder list to store model features
ard_model_lst = [('intercept', ard_fit.intercept_.round(decimals=2))]
# printing out each feature-coefficient pair one by one
for val in ard_model_values:
ard_model_lst.append(val)
# checking the results
for pair in ard_model_lst:
print(pair)
# In[31]:
# KNN
# INSTANTIATING a StandardScaler() object
scaler = StandardScaler()
# FITTING the scaler with the data
scaler.fit(apprentice_data)
# TRANSFORMING our data after fit
X_scaled = scaler.transform(apprentice_data)
# converting scaled data into a DataFrame
X_scaled_df = pd.DataFrame(X_scaled)
# adding labels to the scaled DataFrame
X_scaled_df.columns = apprentice_data.columns
# Training testing and splitit again
X_train_STAND, X_test_STAND, y_train_STAND, y_test_STAND = train_test_split(
X_scaled_df,
apprentice_target,
test_size=0.25,
random_state=219)
# INSTANTIATING a model with the optimal number of neighbors
knn_stand = KNeighborsRegressor(algorithm='auto',
n_neighbors=9)
# FITTING the model based on the training data
knn_stand_fit = knn_stand.fit(X_train_STAND, y_train_STAND)
# PREDITCING on new data
knn_stand_pred = knn_stand_fit.predict(X_test)
# SCORING the results
print('KNN Training Score:', knn_stand.score(X_train_STAND, y_train_STAND).round(4))
print('KNN Testing Score :', knn_stand.score(X_test_STAND, y_test_STAND).round(4))
# saving scoring data for future use
knn_stand_score_train = knn_stand.score(X_train_STAND, y_train_STAND).round(4)
knn_stand_score_test = knn_stand.score(X_test_STAND, y_test_STAND).round(4)
# displaying and saving the gap between training and testing
print('KNN Train-Test Gap:', abs(knn_stand_score_train - knn_stand_score_test).round(4))
knn_stand_test_gap = abs(knn_stand_score_train - knn_stand_score_test).round(4)
# In[32]:
# comparing results
print(f"""
Model Train Score Test Score Train-Test Gap Model Size
----- ----------- ---------- --------------- ----------
OLS {lr_train_score} {lr_test_score} {lr_test_gap} {len(lr_model_lst)}
Lasso {lasso_train_score} {lasso_test_score} {lasso_test_gap} {len(lasso_model_lst)}
ARD {ard_train_score} {ard_test_score} {ard_test_gap} {len(ard_model_lst)}
""")
# In[33]:
# creating a dictionary for model results
model_performance = {
'Model Type': ['OLS', 'Lasso', 'ARD'],
'Training': [lr_train_score, lasso_train_score,
ard_train_score],
'Testing': [lr_test_score, lasso_test_score,
ard_test_score],
'Train-Test Gap': [lr_test_gap, lasso_test_gap,
ard_test_gap],
'Model Size': [len(lr_model_lst), len(lasso_model_lst),
len(ard_model_lst)],
'Model': [lr_model_lst, lasso_model_lst, ard_model_lst]}
# converting model_performance into a DataFrame
model_performance = pd.DataFrame(model_performance)
model_performance.head()
# <hr style="height:.9px;border:none;color:#333;background-color:#333;" /><br>
#
# ## Part 4: Final Model Selected
#
# The best model from the above analysis is the OLS regression which has the following:
#
# In[34]:
# Selected Model
print(f"""
The Model selected is OLS Regression
Model Train Score Test Score Train-Test Gap Model Size
----- ----------- ---------- --------------- ----------
OLS {lr_train_score} {lr_test_score} {lr_test_gap} {len(lr_model_lst)}
Model Coefficients
----------------------
{lr_model_lst_no_indices}
""")
|
python
|
from flocx_ui.api import schema
from flocx_ui.api.utils import generic_provider_request as generic_request
from flocx_ui.api.utils import validate_data_with
def post(path, **kwargs):
"""An alias for generic_request with the type set to 'POST'
:param path: A url path
:param **kwargs: The keyword arguments to be passed to the request function
:return: A request for the given path
"""
return generic_request('POST', path, **kwargs)
@validate_data_with(None, schema.validate_provider_offer)
def offer_create(request, offer):
"""Create an offer
:param request: HTTP request
:param offer: The offer to be created
:return: The offer that was created
"""
response = post('/v1/offers', json=offer, token=request.user.token.id)
data = response.json()
return data
|
python
|
import bluesky.plan_stubs as bps
import bluesky.plans as bp
import bluesky.preprocessors as bpp
import numpy as np
import pytest
from ophyd.sim import SynAxis, hw
import nabs.plans as nbp
from nabs.simulators import validate_plan
hw = hw()
class LimitedMotor(SynAxis):
def check_value(self, value, **kwargs):
if np.abs(value) > 10:
raise ValueError("value out of bounds")
limit_motor = LimitedMotor(name='limit_motor', labels={'motors'})
@bpp.set_run_key_decorator("run_2")
@bpp.run_decorator(md={})
def sim_plan_inner(npts=2):
for j in range(npts):
yield from bps.mov(hw.motor1, j * 0.1 + 1,
hw.motor2, j * 0.2 - 2)
yield from bps.trigger_and_read([hw.motor1, hw.motor2,
hw.det2])
@bpp.set_run_key_decorator("run_1")
@bpp.run_decorator(md={})
def sim_plan_outer(npts):
for j in range(int(npts/2)):
yield from bps.mov(hw.motor, j * 0.2)
yield from bps.trigger_and_read([hw.motor, hw.det])
yield from sim_plan_inner(npts + 1)
for j in range(int(npts/2), npts):
yield from bps.mov(hw.motor, j * 0.2)
yield from bps.trigger_and_read([hw.motor, hw.det])
def bad_limits():
yield from bps.open_run()
yield from bps.sleep(1)
yield from bps.mv(limit_motor, 100)
yield from bps.sleep(1)
yield from bps.close_run()
def bad_nesting():
yield from bps.open_run()
yield from bp.count([])
yield from bps.close_run()
def bad_call():
yield from bps.open_run()
limit_motor.set(10)
yield from bps.close_run()
def bad_stage():
yield from bps.stage(hw.det)
@pytest.mark.parametrize(
'plan',
[
bad_limits(),
bad_nesting(),
bad_call(),
]
)
def test_bad_plans(plan):
success, _ = validate_plan(plan)
assert success is False
@pytest.mark.parametrize(
'plan',
[
sim_plan_outer(4),
bp.count([hw.det], num=2),
bp.scan([hw.det, hw.det2, hw.motor],
hw.motor, 0, 1, hw.motor2, 1, 20, 10),
nbp.daq_dscan([hw.det], hw.motor, 1, 0, 2, events=1)
]
)
def test_good_plans(plan, daq):
success, _ = validate_plan(plan)
assert success is True
|
python
|
def test_list_devices(client):
devices = client.devices()
assert len(devices) > 0
assert any(map(lambda device: device.serial == "emulator-5554", devices))
def test_version(client):
version = client.version()
assert type(version) == int
assert version != 0
|
python
|
import numpy as np
from gutfit import model, parameterlist
def matrix_diag3(d1,d2,d3):
return np.array([[d1, 0.0, 0.0], [0.0, d2, 0.0], [0.0, 0.0, d3]])
# Generic Rotations #
def matrix_rot23(th23):
return np.array([[1.0, 0.0 , 0.0],
[0.0, np.cos(th23), np.sin(th23)],
[0.0, -np.sin(th23), np.cos(th23)]])
def matrix_rot12(th12):
return np.array([[ np.cos(th12), np.sin(th12), 0.0],
[-np.sin(th12), np.cos(th12), 0.0],
[ 0.0, 0.0, 1.0]])
def matrix_rot13(th13, delta):
return np.array([[ np.cos(th13), 0.0, np.sin(th13) * np.exp(-1j * delta)],
[ 0.0 , 1.0, 0.0 ],
[-np.sin(th13)* np.exp(1j * delta), 0.0, np.cos(th13)]],
dtype=np.complex64)
def matrix_vckm(th12, th13, th23, delta):
return matrix_rot23(th23) @ matrix_rot13(th13, delta) @ matrix_rot12(th12)
# Phase Matrices #
def matrix_phase(a1, a2, a3):
return np.array([[np.exp(1j * a1), 0.0, 0.0],
[ 0.0, np.exp(1j * a2), 0.0],
[ 0.0, 0.0, np.exp(1j * a3)]],
dtype=np.complex64)
def matrix_Yd(a1, a2, a3, b1, b2, th12, th13, th23, delta, yd, ys, yb):
Pa = matrix_phase(a1, a2, a3)
Pb = matrix_phase(b1, b2, 0.0)
Vckm = matrix_vckm(th12, th13, th23, delta)
Yddiag = matrix_diag3(yd, ys, yb)
Yukd = Pa @ Vckm @ Yddiag @ Pb @ np.transpose(Vckm) @ Pa
return Yukd
class Type1And2SeeSaw(model.Model):
def __init__(self):
params = [
"generic_quark_phase_a1",
"generic_quark_phase_a2",
"generic_quark_phase_a3",
"generic_quark_phase_b1",
"generic_quark_phase_b2",
"data_quark_th12",
"data_quark_th13",
"data_quark_th23",
"data_quark_delta",
"data_quark_yu",
"data_quark_yc",
"data_quark_yt",
"data_quark_yd",
"data_quark_ys",
"data_quark_yb",
"model1_mL",
"model1_mR",
"model1_r1",
"model1_Rer2",
"model1_Imr2"
]
super().__init__(params)
@property
def val(self):
return np.abs(
self.MnuTheory(
self.generic_quark_phase_a1,
self.generic_quark_phase_a2,
self.generic_quark_phase_a3,
self.generic_quark_phase_b1,
self.generic_quark_phase_b2,
self.data_quark_th12,
self.data_quark_th13,
self.data_quark_th23,
self.data_quark_delta,
self.data_quark_yu,
self.data_quark_yc,
self.data_quark_yt,
self.data_quark_yd,
self.data_quark_ys,
self.data_quark_yb,
self.model1_mL,
self.model1_mR,
self.model1_r1,
self.model1_Rer2,
self.model1_Imr2
)
)
def MnuTheory(self, a1, a2, a3, b1, b2, th12q, th13q, th23q, deltaq, yu, yc, yt, yd, ys, yb, mL, mR, r1, Rer2, Imr2):
Yd = matrix_Yd(a1, a2, a3, b1, b2, th12q, th13q, th23q, deltaq, yd, ys, yb)
Yu = matrix_diag3(yu, yc, yt)
r2 = Rer2 + 1j * Imr2
type1p1 = 8.0 * (r2 - 3.0)/(r2-1.0) * Yu
type1p2 = -16.0 /(r1 * (r2 - 1.0)) * Yd
type1p3 = (r1 * (r2 - 1.0))/r2 * Yu @ np.linalg.inv(r1 * Yu - Yd) @ Yu
type1 = mR * (type1p1 + type1p2 + type1p3)
type2p1 = Yu / (r2 - 1)
type2p2 = -Yd / (r1 * (r2 - 1))
type2 = mL * (type2p1 + type2p2)
return type1 + type2
# def MnuTheory(self, a1, a2, a3, b1, b2, th12q, th13q, th23q, deltaq, yu, yc, yt, yd, ys, yb, mL, mR, r1, Rer2, Imr2):
# Yd = matrix_Yd(a1, a2, a3, b1, b2, th12q, th13q, th23q, deltaq, yd, ys, yb)
# Yu = matrix_diag3(yu, yc, yt)
# r2 = Rer2 + 1j * Imr2
# type1p1 = 8.0 * (r2 - 3.0)/(r2-1.0) * Yu
# type1p2 = -16.0 /(r1 * (r2 - 1.0)) * Yd
# type1p3 = (r1 * (r2 - 1.0))/r2 * Yu @ np.linalg.inv(r1 * Yu - Yd) @ Yu
# type1 = mR * (type1p1 + type1p2 + type1p3)
# type2p1 = Yu / (r2 - 1)
# type2p2 = -Yd / (r1 * (r2 - 1))
# type2 = (type2p1 + type2p2)
# return (type1/mL) + type2
if __name__=="__main__":
E = Type1And2SeeSaw()
PL = parameterlist.ParameterList.fromConfigFile("examples/param_card.dat")
from IPython import embed
embed()
E(PL())
import time
t0 = time.time()
for _ in range(1000000):
E(PL())
print(time.time() - t0)
|
python
|
import argparse
import json
import logging
import random
import numpy as np
import torch
from decouple import config
from tqdm import tqdm
from GPT2.config import GPT2Config
from GPT2.encoder import get_encoder
from GPT2.model import GPT2LMHeadModel
from GPT2.utils import load_weight
# import os
# import torch.nn.functional as F
# from array import array
parser = argparse.ArgumentParser(description="Validity Tensor Estimation")
parser.add_argument(
"-gs",
default="data/groundStrings.json",
type=str,
help="sets the input grond string file",
)
parser.add_argument(
"-pt",
default="data/perterbationTensor.json",
type=str,
help="sets the input perterbation tensor file.",
)
parser.add_argument(
"-gvi",
default="data/groundValidityTensor.json",
type=str,
help="sets the input ground validity tensor file.",
)
parser.add_argument(
"-gvo",
default="data/groundValidityTensor.json",
type=str,
help="sets the output ground validity tensor file.",
)
parser.add_argument(
"-vo",
default="data/validityTensor.json",
type=str,
help="sets the output validity tensor file.",
)
parser.add_argument(
"-d",
type=str,
help="Sets the device to use.\n"
"Choices: 'gpu' for GPU, 'cpu' for CPU\n"
"(If left blank defaults to 'DEVICE' entry in .env file.)\n",
)
parser.add_argument(
"-checkpoint",
default=None,
type=str,
help="Begin again from end of partial validity tensor file.\n"
"Accepts: file path to .json containing validity tensor.\n",
)
args = vars(parser.parse_args())
logging.basicConfig(
filename="logs/validtyTensor.log",
level=logging.DEBUG,
format="[%(asctime)s|%(name)s|make_validity_tensor.py|%(levelname)s] %(message)s",
)
if args["d"]:
device_choice = args["d"]
else:
device_choice = config("DEVICE")
print("\nDEVICE:", device_choice, "\n")
if device_choice == "gpu" and not torch.cuda.is_available():
print("CUDA unavailable, defaulting to CPU.")
device_choice = "cpu"
if device_choice == "gpu":
print("gpu accellerated")
else:
print("cpu bound")
state_dict = torch.load(
config("MODEL_LOCATION"),
map_location="cpu"
if (not torch.cuda.is_available() or device_choice == "cpu")
else None,
)
print("\nValidity Tensor Estimation\n")
# -- Setting up PyTorch Information -- #
seed = random.randint(0, 2147483647)
np.random.seed(seed)
torch.random.manual_seed(seed)
torch.cuda.manual_seed(seed)
# device = torch.device("cpu")
device = torch.device(
"cuda" if (torch.cuda.is_available() and device_choice == "gpu") else "cpu"
)
known_configurations = {
"s_ai": GPT2Config(),
"xl_ai": GPT2Config(
vocab_size_or_config_json_file=50257,
n_positions=1024,
n_ctx=1024,
n_embd=1600,
n_layer=48,
n_head=25,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
),
}
# -- Load Model -- #
gpt2_config = known_configurations[config("MODEL_NAME")]
model = GPT2LMHeadModel(gpt2_config)
model = load_weight(model, state_dict)
model.share_memory()
model.to(device)
model.eval()
# -- serving BrainSqueeze resources. --#
def tokenize(text: str):
enc = get_encoder()
tokens = enc.encode(text)
return tokens
def detokenize(tokens: iter):
enc = get_encoder()
text = enc.decode(tokens)
return text
def firstMismatch(tokensA: iter, tokensB: iter):
# assumes tokensA is shorter than, or as long as, tokensB.
for i in range(len(tokensA)):
if tokensA[i] != tokensB[i]:
return i
return None
def firstMismatchInclusive(tokensA: iter, tokensB: iter):
# makes no assumptions about the lengths of tokensA and tokensB.
for i in range(min(len(tokensA), len(tokensB))):
if tokensA[i] != tokensB[i]:
return i
return min(len(tokensA), len(tokensB))
def predictedDistribution(
model=model,
start_token=50256,
batch_size=1,
tokens=None,
temperature: float = None,
top_k=1,
device=device,
):
"""returns a probability distribution for the next byte-pair encoding"""
if tokens is None:
context = torch.full(
(batch_size, 1), start_token, device=device, dtype=torch.long
)
elif type(tokens) is torch.Tensor:
context = tokens.unsqueeze(0).repeat(batch_size, 1)
else:
context = (
torch.tensor(tokens, device=device, dtype=torch.long)
.unsqueeze(0)
.repeat(batch_size, 1)
)
prev = context
past = None
with torch.no_grad():
logits, past = model(prev, past=past)
logits = logits[:, -1, :]
return logits[0]
def errorSeries(tokens: list, pbar: tqdm):
radii = []
# get first radius (special case)
logits = predictedDistribution(start_token=50256) # 50256 => <|endoftext|>
prob = logits[tokens[0]]
clamped = torch.clamp(logits, min=prob, max=None)
clamped.add_(-prob)
radius = torch.count_nonzero(clamped).item()
radii.append(radius)
if pbar is not None:
pbar.update(1)
# get all following radii
for i in range(1, len(tokens)):
logits = predictedDistribution(tokens=tokens[:i])
prob = logits[tokens[i]]
clamped = torch.clamp(logits, min=prob, max=None)
clamped.add_(-prob)
radius = torch.count_nonzero(clamped).item()
radii.append(radius)
if pbar is not None:
pbar.update(1)
return radii
def partialErrorSeries(tokens: list, start: int):
def getRadius(logits, token):
prob = logits[token]
clamped = torch.clamp(logits, min=prob, max=None)
clamped.add_(-prob)
radius = torch.count_nonzero(clamped).item()
return radius
radii = []
if start == 0:
# get first radius (special case)
logits = predictedDistribution(start_token=50256) # 50256 => <|endoftext|>
radius = getRadius(logits, tokens[0])
radii.append(radius)
# then get all following radii
for i in range(1, len(tokens)):
logits = predictedDistribution(tokens=tokens[:i])
radius = getRadius(logits, tokens[i])
radii.append(radius)
return radii
else:
for i in range(start, len(tokens)):
logits = predictedDistribution(tokens=tokens[:i])
radius = getRadius(logits, tokens[i])
radii.append(radius)
return radii
def calculateGroundValidityTensor(groundStrings: iter):
gvBar = tqdm(total=len(groundStrings), desc="GroundValidity", position=0)
gvTen = []
coder = get_encoder()
for gs in groundStrings:
tokens = coder.encode(gs)
radii = errorSeries(tokens, None)
gvTen.append(radii)
gvBar.update()
return gvTen
def calculateValidityTensor(
groundTokens: iter,
groundValidityTensor: iter,
perterbationTensor: iter,
checkpoint: str = None,
):
validityTensor = []
totalBar = tqdm(total=len(perterbationTensor), desc="Total", position=0)
symbolBar = tqdm(total=len(perterbationTensor[0][1]), desc="TBD", position=1)
vectorBar = tqdm(total=len(perterbationTensor[0][1][0]), desc="Vector", position=2)
if checkpoint:
with open(checkpoint, "r") as f:
validityTensor = json.load(f)
# don't recalculate any symbols that have already been done
already = len(validityTensor)
perterbationTensor = perterbationTensor[already::]
totalBar.update(already)
coder = get_encoder()
for sym, plane in perterbationTensor:
logging.info("Started Symbol: " + sym)
symbolBar.reset()
symbolBar.set_description(sym)
vPlane = []
for i, vector in enumerate(plane):
vVector = []
vectorBar.reset(total=len(vector))
for pString in vector:
# tokenize pString
pTokens = coder.encode(pString)
# locate departure form ground tokens
departure = firstMismatch(pTokens, groundTokens[i])
if departure is not None:
# sum error up to agreement with groundTokens
agreement = sum(groundValidityTensor[i][:departure])
# calculate validity of peterbed string from departure onward
departureValidity = partialErrorSeries(pTokens, departure)
# calculate total validity
validity = agreement + sum(departureValidity)
# compare to ground validity
validity_delta = (
sum(groundValidityTensor[i]) - validity
) # lower validity is better
else:
validity_delta = 0
vVector.append(validity_delta)
vectorBar.update()
vPlane.append(vVector)
symbolBar.update()
validityTensor.append((sym, vPlane))
totalBar.update()
logging.info("Finished Symbol: " + sym)
with open(args["vo"], "w") as f: # save checkpoint
json.dump(validityTensor, f)
vectorBar.close()
symbolBar.close()
totalBar.close()
return validityTensor
if __name__ == "__main__":
# with open(args["gs"], "r") as f:
# groundStrings = json.load(f)
# gvTen = calculateGroundValidityTensor(groundStrings)
# with open(args["gvo"], "w") as f:
# json.dump(gvTen, f)
with open(args["gs"], "r") as f:
groundStrings = json.load(f)
groundTokens = []
coder = get_encoder()
for gs in groundStrings:
groundTokens.append(coder.encode(gs))
with open(args["gvi"], "r") as f:
groundValidity = json.load(f)
with open(args["pt"], "r") as f:
perterbationTensor = json.load(f)
vt = calculateValidityTensor(
groundTokens, groundValidity, perterbationTensor, checkpoint=args["checkpoint"]
)
print("\n\n\n### --- SUCCESS! --- ###\n\n\n")
|
python
|
#
# PySNMP MIB module SUN-T300-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SUN-T300-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:04:28 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ObjectIdentity, Bits, iso, Counter32, ModuleIdentity, NotificationType, Counter64, IpAddress, enterprises, NotificationType, MibIdentifier, Unsigned32, Gauge32, TimeTicks, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Bits", "iso", "Counter32", "ModuleIdentity", "NotificationType", "Counter64", "IpAddress", "enterprises", "NotificationType", "MibIdentifier", "Unsigned32", "Gauge32", "TimeTicks", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
t300 = ModuleIdentity((1, 3, 6, 1, 4, 1, 42, 2, 28, 2))
if mibBuilder.loadTexts: t300.setLastUpdated('0012140000Z')
if mibBuilder.loadTexts: t300.setOrganization('SUN MICROSYSTEMS INCORPORATED')
sun = MibIdentifier((1, 3, 6, 1, 4, 1, 42))
products = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2))
storage_subsystem = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28)).setLabel("storage-subsystem")
t300Reg = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 1))
t300Purple1 = ObjectIdentity((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 1, 1))
if mibBuilder.loadTexts: t300Purple1.setStatus('current')
t300Objs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2))
t300SystemObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1))
t300UnitObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 2))
t300FruObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3))
t300VolumeObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4))
t300PortObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5))
t300AttachObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 6))
t300LoopObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 7))
t300LogObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 8))
t300OndgObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 9))
t300Events = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 3))
t300EventsV2 = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 3, 0))
sysId = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysId.setStatus('mandatory')
sysVendor = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysVendor.setStatus('mandatory')
sysModel = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysModel.setStatus('mandatory')
sysRevision = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysRevision.setStatus('mandatory')
sysStripeUnitSize = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysStripeUnitSize.setStatus('mandatory')
sysCacheMode = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("disabled", 1), ("writeThrough", 2), ("writeBehind", 3), ("auto", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCacheMode.setStatus('mandatory')
sysCacheMirror = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("off", 1), ("auto", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCacheMirror.setStatus('mandatory')
sysAutoDisable = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("disableOnly", 2), ("disableRecon", 3), ("reconOnly", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysAutoDisable.setStatus('obsolete')
sysMpSupport = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("readWrite", 2), ("mpxio", 3), ("std", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysMpSupport.setStatus('mandatory')
sysReadAhead = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("off", 1), ("on", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysReadAhead.setStatus('mandatory')
sysReconRate = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("low", 1), ("medium", 2), ("high", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysReconRate.setStatus('mandatory')
sysOndgMode = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("off", 1), ("passive", 2), ("active", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysOndgMode.setStatus('mandatory')
sysOndgTimeslice = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysOndgTimeslice.setStatus('mandatory')
sysIdleDiskTimeout = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysIdleDiskTimeout.setStatus('obsolete')
sysFruRemovalShutdown = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysFruRemovalShutdown.setStatus('mandatory')
sysBootMode = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("auto", 2), ("tftp", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysBootMode.setStatus('mandatory')
sysBootDelay = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysBootDelay.setStatus('mandatory')
sysSpinDelay = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysSpinDelay.setStatus('obsolete')
sysTftpHost = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 19), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysTftpHost.setStatus('mandatory')
sysTftpFile = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 20), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysTftpFile.setStatus('mandatory')
sysIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 21), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysIpAddr.setStatus('mandatory')
sysSubNet = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 22), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysSubNet.setStatus('mandatory')
sysGateway = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 23), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysGateway.setStatus('mandatory')
sysWriteRequests = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysWriteRequests.setStatus('mandatory')
sysReadRequests = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysReadRequests.setStatus('mandatory')
sysBlocksWritten = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysBlocksWritten.setStatus('mandatory')
sysBlocksRead = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 27), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysBlocksRead.setStatus('mandatory')
sysCacheWriteHits = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 28), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCacheWriteHits.setStatus('mandatory')
sysCacheWriteMisses = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 29), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCacheWriteMisses.setStatus('mandatory')
sysCacheReadHits = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 30), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCacheReadHits.setStatus('mandatory')
sysCacheReadMisses = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 31), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCacheReadMisses.setStatus('mandatory')
sysCacheRmwFlushes = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 32), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCacheRmwFlushes.setStatus('mandatory')
sysCacheReconFlushes = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 33), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCacheReconFlushes.setStatus('mandatory')
sysCacheStripeFlushes = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 34), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCacheStripeFlushes.setStatus('mandatory')
sysTimezone = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 35), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysTimezone.setStatus('mandatory')
sysDate = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 36), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysDate.setStatus('mandatory')
sysTime = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 37), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysTime.setStatus('mandatory')
sysRootSession = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 38), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysRootSession.setStatus('obsolete')
sysGuestSession = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 39), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysGuestSession.setStatus('obsolete')
sysLastMessage = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 40), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysLastMessage.setStatus('mandatory')
sysRarpEnabled = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 41), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysRarpEnabled.setStatus('mandatory')
sysLoop1Split = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 42), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("off", 1), ("auto", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysLoop1Split.setStatus('mandatory')
sysLastRestart = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 43), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysLastRestart.setStatus('mandatory')
sysCtime = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 44), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCtime.setStatus('mandatory')
sysHasVolumes = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 45), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysHasVolumes.setStatus('mandatory')
unitCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 2, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: unitCount.setStatus('mandatory')
unitTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 2, 2), )
if mibBuilder.loadTexts: unitTable.setStatus('mandatory')
unitEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 2, 2, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"))
if mibBuilder.loadTexts: unitEntry.setStatus('mandatory')
unitIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 2, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: unitIndex.setStatus('mandatory')
unitId = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 2, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: unitId.setStatus('mandatory')
unitType = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 2, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("controller", 1), ("expansion", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: unitType.setStatus('mandatory')
unitStandby = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 2, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: unitStandby.setStatus('mandatory')
fruCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruCount.setStatus('mandatory')
fruTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2), )
if mibBuilder.loadTexts: fruTable.setStatus('mandatory')
fruEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "fruIndex"))
if mibBuilder.loadTexts: fruEntry.setStatus('mandatory')
fruIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruIndex.setStatus('mandatory')
fruId = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruId.setStatus('mandatory')
fruType = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("diskDrive", 1), ("controllerCard", 2), ("loopCard", 3), ("powerUnit", 4), ("midplane", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruType.setStatus('mandatory')
fruStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("notInstalled", 1), ("fault", 2), ("ready", 3), ("offline", 4), ("booting", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruStatus.setStatus('mandatory')
fruState = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("substituted", 3), ("missing", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruState.setStatus('mandatory')
fruVendor = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruVendor.setStatus('mandatory')
fruModel = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruModel.setStatus('mandatory')
fruRevision = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruRevision.setStatus('mandatory')
fruSerialNo = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruSerialNo.setStatus('mandatory')
fruErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruErrors.setStatus('mandatory')
fruDiskCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruDiskCount.setStatus('mandatory')
fruDiskTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 4), )
if mibBuilder.loadTexts: fruDiskTable.setStatus('mandatory')
fruDiskEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 4, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "fruIndex"))
if mibBuilder.loadTexts: fruDiskEntry.setStatus('mandatory')
fruDiskRole = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 4, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unassigned", 1), ("dataDisk", 2), ("standbyDisk", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruDiskRole.setStatus('mandatory')
fruDiskPort1State = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("ready", 1), ("notReady", 2), ("bypass", 3), ("unknown", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruDiskPort1State.setStatus('mandatory')
fruDiskPort2State = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("ready", 1), ("notReady", 2), ("bypass", 3), ("unknown", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruDiskPort2State.setStatus('mandatory')
fruDiskCapacity = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 4, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruDiskCapacity.setStatus('mandatory')
fruDiskStatusCode = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 4, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruDiskStatusCode.setStatus('mandatory')
fruDiskVolName = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 4, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruDiskVolName.setStatus('mandatory')
fruDiskTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 4, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruDiskTemp.setStatus('mandatory')
fruCtlrCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruCtlrCount.setStatus('mandatory')
fruCtlrTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 6), )
if mibBuilder.loadTexts: fruCtlrTable.setStatus('mandatory')
fruCtlrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 6, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "fruIndex"))
if mibBuilder.loadTexts: fruCtlrEntry.setStatus('mandatory')
fruCtlrCpuDesc = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 6, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruCtlrCpuDesc.setStatus('mandatory')
fruCtlrRole = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 6, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("master", 1), ("alternateMaster", 2), ("slave", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruCtlrRole.setStatus('mandatory')
fruCtlrPartnerId = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 6, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruCtlrPartnerId.setStatus('mandatory')
fruCtlrCtState = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 6, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("expansionUnit", 1), ("booting", 2), ("online", 3), ("disabled", 4), ("disabling", 5), ("reset", 6), ("resetting", 7), ("reconfig", 8), ("hotPlug", 9), ("virtual", 10)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruCtlrCtState.setStatus('mandatory')
fruCtlrCacheSize = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 6, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruCtlrCacheSize.setStatus('mandatory')
fruCtlrTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 6, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruCtlrTemp.setStatus('mandatory')
fruCtlrMdate = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 6, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruCtlrMdate.setStatus('mandatory')
fruCtlrConsoleBaud = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 6, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruCtlrConsoleBaud.setStatus('mandatory')
fruLoopCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruLoopCount.setStatus('mandatory')
fruLoopTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 8), )
if mibBuilder.loadTexts: fruLoopTable.setStatus('mandatory')
fruLoopEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 8, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "fruIndex"))
if mibBuilder.loadTexts: fruLoopEntry.setStatus('mandatory')
fruLoopMode = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 8, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("master", 1), ("slave", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruLoopMode.setStatus('mandatory')
fruLoopTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 8, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruLoopTemp.setStatus('mandatory')
fruLoopCable1State = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 8, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("notInstalled", 1), ("installed", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruLoopCable1State.setStatus('mandatory')
fruLoopCable2State = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 8, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("notInstalled", 1), ("installed", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruLoopCable2State.setStatus('mandatory')
fruLoopMdate = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 8, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruLoopMdate.setStatus('mandatory')
fruPowerCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerCount.setStatus('mandatory')
fruPowerTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10), )
if mibBuilder.loadTexts: fruPowerTable.setStatus('mandatory')
fruPowerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "fruIndex"))
if mibBuilder.loadTexts: fruPowerEntry.setStatus('mandatory')
fruPowerPowOutput = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("off", 1), ("normal", 2), ("fault", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerPowOutput.setStatus('mandatory')
fruPowerPowSource = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("line", 1), ("battery", 2), ("unknown", 3), ("none", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerPowSource.setStatus('mandatory')
fruPowerPowTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("normal", 1), ("overTemp", 2), ("unknown", 3), ("none", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerPowTemp.setStatus('mandatory')
fruPowerFan1State = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("normal", 1), ("fault", 2), ("missing", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerFan1State.setStatus('mandatory')
fruPowerFan2State = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("normal", 1), ("fault", 2), ("missing", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerFan2State.setStatus('mandatory')
fruPowerBatState = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("notInstalled", 1), ("normal", 2), ("fault", 3), ("refreshing", 4), ("unknown", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerBatState.setStatus('mandatory')
fruPowerBatLife = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerBatLife.setStatus('mandatory')
fruPowerBatUsed = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerBatUsed.setStatus('mandatory')
fruPowerPowMdate = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerPowMdate.setStatus('mandatory')
fruPowerBatMdate = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerBatMdate.setStatus('mandatory')
fruMidplaneCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruMidplaneCount.setStatus('mandatory')
fruMidplaneTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 12), )
if mibBuilder.loadTexts: fruMidplaneTable.setStatus('mandatory')
fruMidplaneEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 12, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "fruIndex"))
if mibBuilder.loadTexts: fruMidplaneEntry.setStatus('mandatory')
fruMidplaneMdate = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 12, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruMidplaneMdate.setStatus('mandatory')
volCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCount.setStatus('mandatory')
volTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2), )
if mibBuilder.loadTexts: volTable.setStatus('mandatory')
volEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "volIndex"))
if mibBuilder.loadTexts: volEntry.setStatus('mandatory')
volIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volIndex.setStatus('mandatory')
volId = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: volId.setStatus('mandatory')
volName = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: volName.setStatus('mandatory')
volWWN = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: volWWN.setStatus('mandatory')
volStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("deleted", 1), ("uninitialized", 2), ("unmounted", 3), ("mounted", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: volStatus.setStatus('mandatory')
volCacheMode = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("disabled", 1), ("writeThrough", 2), ("writeBehind", 3), ("auto", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCacheMode.setStatus('mandatory')
volCacheMirror = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("off", 1), ("on", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCacheMirror.setStatus('mandatory')
volCapacity = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCapacity.setStatus('mandatory')
volArrayWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volArrayWidth.setStatus('mandatory')
volRaidLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("raid0", 1), ("raid1", 2), ("raid5", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: volRaidLevel.setStatus('mandatory')
volWriteRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volWriteRequests.setStatus('mandatory')
volReadRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volReadRequests.setStatus('mandatory')
volBlocksWritten = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volBlocksWritten.setStatus('mandatory')
volBlocksRead = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volBlocksRead.setStatus('mandatory')
volSoftErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volSoftErrors.setStatus('mandatory')
volFirmErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volFirmErrors.setStatus('mandatory')
volHardErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volHardErrors.setStatus('mandatory')
volCacheWriteHits = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCacheWriteHits.setStatus('mandatory')
volCacheWriteMisses = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCacheWriteMisses.setStatus('mandatory')
volCacheReadHits = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCacheReadHits.setStatus('mandatory')
volCacheReadMisses = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCacheReadMisses.setStatus('mandatory')
volCacheRmwFlushes = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCacheRmwFlushes.setStatus('mandatory')
volCacheReconFlushes = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCacheReconFlushes.setStatus('mandatory')
volCacheStripeFlushes = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCacheStripeFlushes.setStatus('mandatory')
volDisabledDisk = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 25), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: volDisabledDisk.setStatus('mandatory')
volSubstitutedDisk = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 26), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: volSubstitutedDisk.setStatus('mandatory')
volOper = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 27), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("none", 1), ("reconstructing", 2), ("reconstructingToStandby", 3), ("copyingFromStandby", 4), ("copyingToStandby", 5), ("initializing", 6), ("verifying", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: volOper.setStatus('mandatory')
volOperProgress = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 28), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volOperProgress.setStatus('mandatory')
volInitRate = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 29), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volInitRate.setStatus('mandatory')
volVerifyRate = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 30), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volVerifyRate.setStatus('mandatory')
portCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portCount.setStatus('mandatory')
portTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2), )
if mibBuilder.loadTexts: portTable.setStatus('mandatory')
portEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "portIndex"))
if mibBuilder.loadTexts: portEntry.setStatus('mandatory')
portIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portIndex.setStatus('mandatory')
portId = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portId.setStatus('mandatory')
portType = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ultraScsi", 1), ("fibreChannel", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portType.setStatus('mandatory')
portFruId = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portFruId.setStatus('mandatory')
portWriteRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portWriteRequests.setStatus('mandatory')
portReadRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portReadRequests.setStatus('mandatory')
portBlocksWritten = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portBlocksWritten.setStatus('mandatory')
portBlocksRead = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portBlocksRead.setStatus('mandatory')
portSunHost = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portSunHost.setStatus('mandatory')
portWWN = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 136))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portWWN.setStatus('mandatory')
portStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("offline", 1), ("online", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portStatus.setStatus('mandatory')
portErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portErrors.setStatus('mandatory')
portFibreCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portFibreCount.setStatus('mandatory')
portFibreTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 4), )
if mibBuilder.loadTexts: portFibreTable.setStatus('mandatory')
portFibreEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 4, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "portIndex"))
if mibBuilder.loadTexts: portFibreEntry.setStatus('mandatory')
portFibreAlpaMode = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 4, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("hard", 1), ("soft", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portFibreAlpaMode.setStatus('mandatory')
portFibreAlpa = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 4, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portFibreAlpa.setStatus('mandatory')
attachCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 6, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: attachCount.setStatus('mandatory')
attachTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 6, 2), )
if mibBuilder.loadTexts: attachTable.setStatus('mandatory')
attachEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 6, 2, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "portIndex"), (0, "SUN-T300-MIB", "attachIndex"))
if mibBuilder.loadTexts: attachEntry.setStatus('mandatory')
attachIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 6, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: attachIndex.setStatus('mandatory')
attachLun = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 6, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: attachLun.setStatus('mandatory')
attachMode = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 6, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("primary", 1), ("secondary", 2), ("failover", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: attachMode.setStatus('mandatory')
attachVolId = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 6, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: attachVolId.setStatus('mandatory')
attachVolName = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 6, 2, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: attachVolName.setStatus('mandatory')
attachVolOwner = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 6, 2, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: attachVolOwner.setStatus('mandatory')
loopCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 7, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: loopCount.setStatus('mandatory')
loopTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 7, 2), )
if mibBuilder.loadTexts: loopTable.setStatus('mandatory')
loopEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 7, 2, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "loopIndex"))
if mibBuilder.loadTexts: loopEntry.setStatus('mandatory')
loopIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 7, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: loopIndex.setStatus('mandatory')
loopId = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 7, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: loopId.setStatus('mandatory')
loopStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 7, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("available", 1), ("reserved", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: loopStatus.setStatus('mandatory')
loopMux = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 7, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("isolated", 1), ("top", 2), ("bottom", 3), ("middle", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: loopMux.setStatus('mandatory')
logStatus = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 8, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: logStatus.setStatus('mandatory')
logTo = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 8, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: logTo.setStatus('mandatory')
logFile = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 8, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: logFile.setStatus('mandatory')
logLevel = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 8, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("none-0", 1), ("error-1", 2), ("warning-2", 3), ("notice-3", 4), ("all-4", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: logLevel.setStatus('mandatory')
logPort = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 8, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: logPort.setStatus('mandatory')
ondgOper = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 9, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("test", 1), ("fastTest", 2), ("find", 3), ("fastFind", 4), ("healthCheck", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ondgOper.setStatus('mandatory')
ondgOperPending = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 9, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ondgOperPending.setStatus('mandatory')
ondgOperProgress = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 9, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ondgOperProgress.setStatus('mandatory')
ondgError = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 9, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ondgError.setStatus('mandatory')
ondgId = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 9, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ondgId.setStatus('mandatory')
sysMessage = NotificationType((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 3) + (0,1)).setObjects(("SUN-T300-MIB", "sysLastMessage"))
mibBuilder.exportSymbols("SUN-T300-MIB", fruPowerPowTemp=fruPowerPowTemp, t300SystemObjs=t300SystemObjs, volRaidLevel=volRaidLevel, portBlocksWritten=portBlocksWritten, fruLoopEntry=fruLoopEntry, fruTable=fruTable, ondgOperProgress=ondgOperProgress, portFruId=portFruId, logFile=logFile, portIndex=portIndex, fruLoopTemp=fruLoopTemp, fruDiskStatusCode=fruDiskStatusCode, fruPowerBatState=fruPowerBatState, sysGateway=sysGateway, sysBlocksWritten=sysBlocksWritten, portCount=portCount, loopIndex=loopIndex, t300LoopObjs=t300LoopObjs, sysStripeUnitSize=sysStripeUnitSize, portTable=portTable, sysOndgTimeslice=sysOndgTimeslice, sysTftpFile=sysTftpFile, portFibreAlpa=portFibreAlpa, sysFruRemovalShutdown=sysFruRemovalShutdown, unitType=unitType, fruDiskPort1State=fruDiskPort1State, products=products, unitCount=unitCount, fruVendor=fruVendor, fruCtlrCpuDesc=fruCtlrCpuDesc, fruPowerFan1State=fruPowerFan1State, t300FruObjs=t300FruObjs, sysGuestSession=sysGuestSession, volArrayWidth=volArrayWidth, portBlocksRead=portBlocksRead, fruId=fruId, portId=portId, t300=t300, volReadRequests=volReadRequests, unitEntry=unitEntry, volCount=volCount, volCacheRmwFlushes=volCacheRmwFlushes, ondgOper=ondgOper, portEntry=portEntry, volCacheStripeFlushes=volCacheStripeFlushes, volCacheMode=volCacheMode, sysReadAhead=sysReadAhead, sysIpAddr=sysIpAddr, fruErrors=fruErrors, volEntry=volEntry, sysDate=sysDate, volCapacity=volCapacity, volBlocksRead=volBlocksRead, sysCacheMode=sysCacheMode, fruCtlrRole=fruCtlrRole, fruMidplaneTable=fruMidplaneTable, fruPowerCount=fruPowerCount, fruMidplaneMdate=fruMidplaneMdate, sysWriteRequests=sysWriteRequests, volCacheWriteHits=volCacheWriteHits, fruDiskCapacity=fruDiskCapacity, attachVolName=attachVolName, volSubstitutedDisk=volSubstitutedDisk, t300EventsV2=t300EventsV2, portErrors=portErrors, sysSpinDelay=sysSpinDelay, fruIndex=fruIndex, fruCount=fruCount, sysAutoDisable=sysAutoDisable, t300Objs=t300Objs, sysLastRestart=sysLastRestart, fruPowerEntry=fruPowerEntry, portReadRequests=portReadRequests, sysBootMode=sysBootMode, fruModel=fruModel, PYSNMP_MODULE_ID=t300, storage_subsystem=storage_subsystem, volFirmErrors=volFirmErrors, unitId=unitId, sysHasVolumes=sysHasVolumes, portStatus=portStatus, fruSerialNo=fruSerialNo, t300UnitObjs=t300UnitObjs, loopStatus=loopStatus, fruLoopCable2State=fruLoopCable2State, fruPowerBatLife=fruPowerBatLife, sysLastMessage=sysLastMessage, fruCtlrTable=fruCtlrTable, fruMidplaneCount=fruMidplaneCount, sysCacheWriteHits=sysCacheWriteHits, fruCtlrConsoleBaud=fruCtlrConsoleBaud, t300Reg=t300Reg, volCacheReadHits=volCacheReadHits, attachIndex=attachIndex, sysSubNet=sysSubNet, fruDiskRole=fruDiskRole, sysModel=sysModel, volStatus=volStatus, volCacheReadMisses=volCacheReadMisses, attachVolId=attachVolId, sysRevision=sysRevision, fruCtlrTemp=fruCtlrTemp, fruPowerBatMdate=fruPowerBatMdate, sysLoop1Split=sysLoop1Split, volOper=volOper, portType=portType, attachMode=attachMode, logPort=logPort, t300LogObjs=t300LogObjs, unitIndex=unitIndex, portFibreCount=portFibreCount, sysReadRequests=sysReadRequests, volId=volId, portFibreEntry=portFibreEntry, sysVendor=sysVendor, volSoftErrors=volSoftErrors, fruPowerFan2State=fruPowerFan2State, sysBlocksRead=sysBlocksRead, volTable=volTable, sysId=sysId, attachEntry=attachEntry, sysRootSession=sysRootSession, ondgId=ondgId, sysCacheWriteMisses=sysCacheWriteMisses, attachLun=attachLun, attachVolOwner=attachVolOwner, sysTimezone=sysTimezone, sysCacheReconFlushes=sysCacheReconFlushes, attachTable=attachTable, t300Events=t300Events, logLevel=logLevel, sysCacheMirror=sysCacheMirror, volWriteRequests=volWriteRequests, t300OndgObjs=t300OndgObjs, sysCacheStripeFlushes=sysCacheStripeFlushes, portFibreAlpaMode=portFibreAlpaMode, logStatus=logStatus, t300AttachObjs=t300AttachObjs, fruCtlrCount=fruCtlrCount, loopTable=loopTable, volDisabledDisk=volDisabledDisk, fruEntry=fruEntry, sysMessage=sysMessage, fruDiskEntry=fruDiskEntry, portWWN=portWWN, volVerifyRate=volVerifyRate, volName=volName, sun=sun, sysReconRate=sysReconRate, fruDiskPort2State=fruDiskPort2State, fruCtlrCtState=fruCtlrCtState, fruPowerPowOutput=fruPowerPowOutput, fruCtlrPartnerId=fruCtlrPartnerId, fruStatus=fruStatus, fruLoopTable=fruLoopTable, fruPowerPowMdate=fruPowerPowMdate, sysCacheReadMisses=sysCacheReadMisses, fruLoopMdate=fruLoopMdate, portFibreTable=portFibreTable, ondgOperPending=ondgOperPending, fruPowerTable=fruPowerTable, sysCacheReadHits=sysCacheReadHits, logTo=logTo, loopEntry=loopEntry, volCacheWriteMisses=volCacheWriteMisses, fruType=fruType, fruDiskTemp=fruDiskTemp, volCacheReconFlushes=volCacheReconFlushes, volInitRate=volInitRate, attachCount=attachCount, fruPowerBatUsed=fruPowerBatUsed, fruCtlrEntry=fruCtlrEntry, ondgError=ondgError, t300VolumeObjs=t300VolumeObjs, sysCtime=sysCtime, loopId=loopId, fruDiskCount=fruDiskCount, sysOndgMode=sysOndgMode, volCacheMirror=volCacheMirror, portWriteRequests=portWriteRequests, sysCacheRmwFlushes=sysCacheRmwFlushes, sysTime=sysTime, fruLoopMode=fruLoopMode, loopMux=loopMux, fruDiskVolName=fruDiskVolName, volIndex=volIndex, sysTftpHost=sysTftpHost, fruState=fruState, fruCtlrCacheSize=fruCtlrCacheSize, loopCount=loopCount, fruPowerPowSource=fruPowerPowSource, sysIdleDiskTimeout=sysIdleDiskTimeout, sysBootDelay=sysBootDelay, volBlocksWritten=volBlocksWritten, fruRevision=fruRevision, unitStandby=unitStandby, fruLoopCount=fruLoopCount, volHardErrors=volHardErrors, fruDiskTable=fruDiskTable, fruLoopCable1State=fruLoopCable1State, fruCtlrMdate=fruCtlrMdate, sysRarpEnabled=sysRarpEnabled, fruMidplaneEntry=fruMidplaneEntry, t300Purple1=t300Purple1, unitTable=unitTable, volWWN=volWWN, sysMpSupport=sysMpSupport, volOperProgress=volOperProgress, t300PortObjs=t300PortObjs, portSunHost=portSunHost)
|
python
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import os
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from nlp_architect.models.temporal_convolutional_network import TCN, CommonLayers
class TCNForLM(TCN, CommonLayers):
"""
Main class that defines training graph and defines training run method for language modeling
"""
def __init__(self, *args, **kwargs):
super(TCNForLM, self).__init__(*args, **kwargs)
self.num_words = None
self.input_placeholder_tokens = None
self.label_placeholder_tokens = None
self.learning_rate = None
self.input_embeddings = None
self.prediction = None
self.projection_out = None
self.gen_seq_prob = None
self.training_loss = None
self.validation_loss = None
self.test_loss = None
self.merged_summary_op_train = None
self.merged_summary_op_test = None
self.merged_summary_op_val = None
self.training_update_step = None
def run(self, data_loaders, lr, num_iterations=100, log_interval=100, result_dir="./",
ckpt=None):
"""
Args:
data_loaders: dict, keys are "train", "valid", "test",
values are corresponding iterator dataloaders
lr: float, learning rate
num_iterations: int, number of iterations to run
log_interval: int, number of iterations after which to run validation and log
result_dir: str, path to results directory
ckpt: str, location of checkpoint file
Returns:
None
"""
summary_writer = tf.summary.FileWriter(os.path.join(result_dir, "tfboard"),
tf.get_default_graph())
saver = tf.train.Saver(max_to_keep=None)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init)
if ckpt is not None:
saver.restore(sess, ckpt)
all_vloss = []
for i in range(num_iterations):
x_data, y_data = next(data_loaders["train"])
feed_dict = {self.input_placeholder_tokens: x_data,
self.label_placeholder_tokens: y_data, self.training_mode: True,
self.learning_rate: lr}
_, summary_train, total_loss_i = sess.run([self.training_update_step,
self.merged_summary_op_train,
self.training_loss],
feed_dict=feed_dict)
summary_writer.add_summary(summary_train, i)
if i % log_interval == 0:
print("Step {}: Total: {}".format(i, total_loss_i))
saver.save(sess, result_dir, global_step=i)
val_loss = {}
for split_type in ["valid", "test"]:
val_loss[split_type] = 0
data_loaders[split_type].reset()
count = 0
for x_data_test, y_data_test in data_loaders[split_type]:
feed_dict = {self.input_placeholder_tokens: x_data_test,
self.label_placeholder_tokens: y_data_test,
self.training_mode: False}
val_loss[split_type] += sess.run(self.training_loss, feed_dict=feed_dict)
count += 1
val_loss[split_type] = val_loss[split_type] / count
summary_val = sess.run(self.merged_summary_op_val,
feed_dict={self.validation_loss: val_loss["valid"]})
summary_test = sess.run(self.merged_summary_op_test,
feed_dict={self.test_loss: val_loss["test"]})
summary_writer.add_summary(summary_val, i)
summary_writer.add_summary(summary_test, i)
print("Validation loss: {}".format(val_loss["valid"]))
print("Test loss: {}".format(val_loss["test"]))
all_vloss.append(val_loss["valid"])
if i > 3 * log_interval and val_loss["valid"] >= max(all_vloss[-5:]):
lr = lr / 2.
def run_inference(self, ckpt, num_samples=10, sos=0, eos=1):
"""
Method for running inference for generating sequences
Args:
ckpt: Location of checkpoint file with trained model
num_samples: int, number of samples to generate
sos: int, start of sequence symbol
eos: int, end of sequence symbol
Returns:
List of sequences
"""
saver = tf.train.Saver(max_to_keep=None)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
if ckpt is not None:
saver.restore(sess, ckpt)
results = self.sample_sequence(sess, num_samples, sos=sos, eos=eos)
return results
def build_train_graph(self, num_words=20000, word_embeddings=None, max_gradient_norm=None,
em_dropout=0.4):
"""
Method that builds the graph for training
Args:
num_words: int, number of words in the vocabulary
word_embeddings: numpy array, optional numpy array to initialize embeddings
max_gradient_norm: float, maximum gradient norm value for clipping
em_dropout: float, dropout rate for embeddings
Returns:
None
"""
self.num_words = num_words
with tf.variable_scope("input", reuse=True):
self.input_placeholder_tokens = tf.placeholder(tf.int32, [None, self.max_len],
name='input_tokens')
self.label_placeholder_tokens = tf.placeholder(tf.int32, [None, self.max_len],
name='input_tokens_shifted')
self.learning_rate = tf.placeholder(tf.float32, shape=(), name='learning_rate')
self.input_embeddings = self.define_input_layer(self.input_placeholder_tokens,
word_embeddings,
embeddings_trainable=True)
input_embeddings_dropped = tf.layers.dropout(self.input_embeddings,
rate=em_dropout,
training=self.training_mode)
self.prediction = self.build_network_graph(input_embeddings_dropped,
last_timepoint=False)
if self.prediction.shape[-1] != self.n_features_in:
print("Not tying weights")
tied_weights = False
else:
print("Tying weights")
tied_weights = True
self.projection_out = self.define_projection_layer(self.prediction,
tied_weights=tied_weights)
self.gen_seq_prob = tf.nn.softmax(self.projection_out)
with tf.variable_scope("training"):
params = tf.trainable_variables()
soft_ce = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=self.label_placeholder_tokens, logits=self.projection_out)
ce_last_tokens = tf.slice(soft_ce, [0, int(self.max_len / 2)],
[-1, int(self.max_len / 2)])
self.training_loss = tf.reduce_mean(ce_last_tokens)
summary_ops_train = [tf.summary.scalar("Training Loss", self.training_loss),
tf.summary.scalar("Training perplexity",
tf.exp(self.training_loss))]
self.merged_summary_op_train = tf.summary.merge(summary_ops_train)
self.validation_loss = tf.placeholder(tf.float32, shape=())
summary_ops_val = [tf.summary.scalar("Validation Loss", self.validation_loss),
tf.summary.scalar("Validation perplexity",
tf.exp(self.validation_loss))]
self.merged_summary_op_val = tf.summary.merge(summary_ops_val)
self.test_loss = tf.placeholder(tf.float32, shape=())
summary_ops_test = [tf.summary.scalar("Test Loss", self.test_loss),
tf.summary.scalar("Test perplexity", tf.exp(self.test_loss))]
self.merged_summary_op_test = tf.summary.merge(summary_ops_test)
# Calculate and clip gradients
gradients = tf.gradients(self.training_loss, params)
if max_gradient_norm is not None:
clipped_gradients, _ = tf.clip_by_global_norm(gradients, max_gradient_norm)
else:
clipped_gradients = gradients
grad_norm = tf.global_norm(clipped_gradients)
summary_ops_train.append(tf.summary.scalar("Grad Norm", grad_norm))
# Optimization
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
summary_ops_train.append(tf.summary.scalar("Learning rate", self.learning_rate))
self.merged_summary_op_train = tf.summary.merge(summary_ops_train)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
with tf.control_dependencies(update_ops):
self.training_update_step = optimizer.apply_gradients(zip(clipped_gradients,
params))
def sample_sequence(self, sess, num_samples=10, sos=0, eos=1):
"""
Method for sampling a sequence (repeatedly one symbol at a time)
Args:
sess: tensorflow session
num_samples: int, number of samples to generate
sos: int, start of sequence symbol
eos: int, end of sequence symbol
Returns:
List of sequences
"""
all_sequences = []
for _ in tqdm(range(num_samples)):
sampled_sequence = []
input_sequence = sos * np.ones((1, self.max_len))
count = 0
elem = sos
while (elem != eos) and (count <= self.max_len * 10):
feed_dict = {self.input_placeholder_tokens: input_sequence,
self.training_mode: False}
gen_seq_prob_value = sess.run(self.gen_seq_prob, feed_dict=feed_dict)
prob = gen_seq_prob_value[0, -1, :].astype(np.float64)
prob = prob / sum(prob)
elem = np.where(np.random.multinomial(1, prob))[0][0]
input_sequence = np.roll(input_sequence, -1, axis=-1)
input_sequence[:, -1] = elem
count += 1
sampled_sequence.append(elem)
all_sequences.append(sampled_sequence)
return all_sequences
|
python
|
def foo():
print "hello every body"
|
python
|
from relevanceai.base import _Base
from relevanceai.api.endpoints.centroids import CentroidsClient
class ClusterClient(_Base):
def __init__(self, project, api_key):
self.project = project
self.api_key = api_key
self.centroids = CentroidsClient(project=project, api_key=api_key)
super().__init__(project, api_key)
def aggregate(
self,
dataset_id: str,
vector_fields: list,
metrics: list = [],
groupby: list = [],
filters: list = [],
page_size: int = 20,
page: int = 1,
asc: bool = False,
flatten: bool = True,
alias: str = "default",
):
"""
Takes an aggregation query and gets the aggregate of each cluster in a collection. This helps you interpret each cluster and what is in them.
It can only can be used after a vector field has been clustered. \n
For more information about aggregations check out services.aggregate.aggregate.
Parameters
----------
dataset_id : string
Unique name of dataset
vector_fields : list
The vector field that was clustered on
metrics: list
Fields and metrics you want to calculate
groupby: list
Fields you want to split the data into
filters: list
Query for filtering the search results
page_size: int
Size of each page of results
page: int
Page of the results
asc: bool
Whether to sort results by ascending or descending order
flatten: bool
Whether to flatten
alias: string
Alias used to name a vector field. Belongs in field_{alias}vector
"""
endpoint = "/services/cluster/aggregate"
method = "POST"
parameters = {
"dataset_id": dataset_id,
"aggregation_query": {"groupby": groupby, "metrics": metrics},
"filters": filters,
"page_size": page_size,
"page": page,
"asc": asc,
"flatten": flatten,
"vector_fields": vector_fields,
"alias": alias,
}
self._log_to_dashboard(
method=method,
parameters=parameters,
endpoint=endpoint,
dashboard_type="cluster_aggregation",
)
return self.make_http_request(
endpoint=endpoint, method=method, parameters=parameters
)
def facets(
self,
dataset_id: str,
facets_fields: list = [],
page_size: int = 20,
page: int = 1,
asc: bool = False,
date_interval: str = "monthly",
):
"""
Takes a high level aggregation of every field and every cluster in a collection. This helps you interpret each cluster and what is in them. \n
Only can be used after a vector field has been clustered.
Parameters
----------
dataset_id : string
Unique name of dataset
facets_fields : list
Fields to include in the facets, if [] then all
page_size: int
Size of each page of results
page: int
Page of the results
asc: bool
Whether to sort results by ascending or descending order
date_interval: string
Interval for date facets
"""
return self.make_http_request(
endpoint="/services/cluster/facets",
method="GET",
parameters={
"dataset_id": dataset_id,
"facets_fields": facets_fields,
"page_size": page_size,
"page": page,
"asc": asc,
"date_interval": date_interval,
},
)
|
python
|
import requests
import urllib
from bs4 import BeautifulSoup
from os import path, makedirs
import wget
class Crawler:
"""
Class for crawl by page ulr-like 'http(s)://page_path/page_name_{number}/ and download pictures
"""
def __init__(self, url_pattern, page_number, css_alt=None):
self.url_pattern = url_pattern
self.page_number = page_number
self.image_urls = []
self.css_alt = css_alt
self.local_path = path.join(path.dirname(path.realpath(__file__)))
self.drop_folder = path.join(self.local_path, self.url_pattern.strip().split('/')[-3])
def get_images_url_list(self):
for num, image_url in enumerate(self.image_urls):
print("Number: {}\t Url: {}\n".format(num, image_url))
def images_urls(self, url_):
r = requests.get(url_)
soup = BeautifulSoup(r.content.decode(), "html.parser")
if self.css_alt:
allfind = ("img", {"alt": self.css_alt})
else:
allfind = ("img")
for img in soup.findAll(allfind):
self.image_urls.append(img.get('src'))
def images(self, url_, drop_name):
if not path.isdir(self.drop_folder):
makedirs(self.drop_folder, mode=0o777, exist_ok=True)
drop_path = path.join(self.drop_folder, drop_name)
try:
wget.download(url_.strip(), drop_path)
except (ValueError, urllib.error.HTTPError) as e:
print("Can't get url {} on page {} because errors {}".format(url_, self.page_number, e))
pass
def main(self):
page_url = self.url_pattern.format(num=self.page_number)
self.images_urls(page_url)
self.get_images_url_list()
if int(self.page_number) < 10:
self.page_number = '0{}'.format(self.page_number)
for num, image_url in enumerate(self.image_urls):
drop_name = '{}.{}.jpg'.format(self.page_number, num)
self.images(image_url, drop_name)
if __name__ == '__main__':
url_p= 'http://site_name_{num}/'
n = 'num'
print("Downloading from page {}\n".format(n))
crawler = Crawler(url_pattern=url_p, page_number=n)
crawler.main()
|
python
|
# Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from scripts.autoscale_sge import CloudProvider, CloudPipelineInstanceHelper
AZURE_DSV = "Dsv3"
AZURE_BMS = "Bms"
GCP_STANDARD = "standard"
GCP_HIGHCPU = "highcpu"
AWS_C5 = "c5"
AWS_P2 = "p2"
def test_aws_familes():
family = CloudPipelineInstanceHelper.get_family_from_type(CloudProvider.aws(), "c5.xlarge")
assert family == AWS_C5
family = CloudPipelineInstanceHelper.get_family_from_type(CloudProvider.aws(), "p2.xlarge")
assert family == AWS_P2
def test_gcp_familes():
family = CloudPipelineInstanceHelper.get_family_from_type(CloudProvider.gcp(), "n2-standard-2")
assert family == GCP_STANDARD
family = CloudPipelineInstanceHelper.get_family_from_type(CloudProvider.gcp(), "n2-highcpu-2")
assert family == GCP_HIGHCPU
family = CloudPipelineInstanceHelper.get_family_from_type(CloudProvider.gcp(), "custom-12-16")
assert family is None
def test_azure_familes():
family = CloudPipelineInstanceHelper.get_family_from_type(CloudProvider.azure(), "Standard_B1ms")
assert family == AZURE_BMS
family = CloudPipelineInstanceHelper.get_family_from_type(CloudProvider.azure(), "Standard_D2s_v3")
assert family == AZURE_DSV
family = CloudPipelineInstanceHelper.get_family_from_type(CloudProvider.azure(), "Standard_D16s_v3")
assert family == AZURE_DSV
|
python
|
#!/usr/bin/env python3
# Povolene knihovny: copy, math
# Import jakekoli jine knihovny neprojde vyhodnocovaci sluzbou.
# To, ze jsou nejake knihovny povolene, neznamena, ze je nutne je pouzit.
# IB002 Domaci uloha 9.
#
# V teto uloze se budeme zabyvat binarnimi vyhledavacimi stromy.
#
# V prvni casti bude Vasi ulohou sestavit skoro uplny binarni vyhledavaci strom
# obsahujici zadane klice. Vstupni pole klicu bude usporadano od nejmensich po
# nejvetsi. Vas algoritmus musi mit LINEARNI casovou slozitost vzhledem k poctu
# zadanych klicu. Tento pozadavek je splnitelny diky usporadanosti pole na
# vstupu.
#
# V druhe casti bude Vasi ulohou zjistit, jestli zadany binarni vyhledavaci
# strom je skoro uplny. Pozadovana casova slozitost je linearni vuci poctu uzlu
# ve strome.
#
# Ve treti casti bude Vasi ulohou zjistit, jestli zadany binarni vyhledavaci
# strom ma vsechny listy ve stejne hloubce. Pozadovana casova slozitost je opet
# linearni vuci poctu uzlu ve strome.
#
# Skoro uplny strom ma zaplnena vsechna patra, jen posledni nemusi byt uplne
# zaplneno (a rovnez nemusi byt doleva zarovnane).
#
# Pro ilustraci, pro vstup (1,2,3,4,5,6,7,8,9,10) je korektnim vystupem
# algoritmu z prvni casti napriklad jeden z nasledujicich stromu:
#
# ( 5 ) ( 7 )
# / \ / \
# (2) (8) ( 4 ) ( 9 )
# / \ / \ / \ / \
# (1) (3) (6) (9) (2) (6) (8) (10)
# \ \ \ / \ /
# (4) (7) (10) (1) (3) (5)
# Do nasledujicich definic trid nijak nezasahujte.
# Pro vykreslovani stromu muzete pouzit dodanou funkci make_graph nize.
class BSTree:
"""Trida BSTree pro reprezentaci binarniho vyhledavacicho stromu.
Atributy:
root koren stromu typu Node, nebo None, pokud je strom prazdny
"""
def __init__(self):
self.root = None
class Node:
"""Trida Node pro reprezentaci uzlu binarniho vyhledavaciho stromu.
Atributy:
data hodnota daneho uzlu (zadana pri inicializaci)
left odkaz na leveho potomka typu Node, nebo None, pokud neexistuje
right odkaz na praveho potomka typu Node, nebo None, pokud neexistuje
"""
def __init__(self, data):
self.left = None
self.right = None
self.data = data
# Ukol 1.
# Implementuje funkci build_bst, ktera dostane vzestupne serazeny seznam hodnot
# a vytvori z nich skoro uplny binarni vyhledavaci strom (typu BSTree).
def build_bst_rec(array, start, end):
""" Build almost complete tree. """
if start > end:
return None
mid = (start + end) // 2
node = Node(array[mid])
node.left = build_bst_rec(array, start, mid - 1)
node.right = build_bst_rec(array, mid + 1, end)
return node
def build_bst(array):
"""
vstup: 'array' vzestupne serazene pole hodnot
vystup: strom typu BSTree, ktery je skoro uplny (viz vyse) a obsahuje
hodnoty z pole array
casova slozitost: O(n), kde 'n' je delka array
extrasekvencni prostorova slozitost:
O(1), nepocitame do ni ovsem vstupni pole ani vystupni strom
"""
tree = BSTree()
tree.root = build_bst_rec(array, 0, len(array) - 1)
return tree
# Ukol 2.
# Implementujte funkci check_almost_complete, ktera dostane binarni vyhledavaci
# strom a otestujte, zda je skoro uplny.
def tree_height_n(node):
""" Return tree height. """
if node is None:
return -1
left = tree_height_n(node.left)
right = tree_height_n(node.right)
return max(left, right) + 1
def check_almost_complete_rec(node, depth, height):
""" Check if given tree is almost complete tree recursively. """
if depth >= height - 1:
return True
if node.left is None or node.right is None:
return False
return check_almost_complete_rec(node.left, depth + 1, height) \
and \
check_almost_complete_rec(node.right, depth + 1, height)
def check_almost_complete(tree):
"""
vstup: 'tree' binarni vyhledavaci strom typu BSTree
vystup: True, pokud je 'tree' skoro uplny
False, jinak
casova slozitost: O(n), kde 'n' je pocet uzlu stromu
extrasekvencni prostorova slozitost: O(1) (nepocitame vstup)
"""
if tree.root is None:
return True
height = tree_height_n(tree.root)
return check_almost_complete_rec(tree.root, 0, height)
# Ukol 3.
# Implementujte funkci check_all_leaves_same_depth, ktera overi, zda jsou
# vsechny listy zadaneho binarniho vyhledavaciho stromu ve stejne hloubce.
class Storage:
def __init__(self):
self.level = None
def check_all_leaves_same_depth_rec(node, depth, storage):
if node is None:
return True
if node.left is None and node.right is None:
if storage.level is None:
storage.level = depth
return True
return depth == storage.level
return check_all_leaves_same_depth_rec(node.left, depth + 1, storage) \
and \
check_all_leaves_same_depth_rec(node.right, depth + 1, storage)
def check_all_leaves_same_depth(tree):
"""
vstup: 'tree' binarni vyhledavaci strom typu BSTree
vystup: True, pokud jsou vsechny listy 'tree' ve stejne hloubce
False, jinak
casova slozitost: O(n), kde 'n' je pocet uzlu stromu
extrasekvencni prostorova slozitost: O(1) (nepocitame vstup)
"""
return check_all_leaves_same_depth_rec(tree.root, 0, Storage())
# Pomocna funkce make_graph vygeneruje .dot soubor na zaklade stromu predaneho
# v argumentu. Cilem funkce je jen zobrazit aktualni stav daneho uzlu a jeho
# potomku, nijak nekontroluje jestli se jedna o BVS.
#
# Na vygenerovany soubor si bud najdete nastroj, nebo pouzijte odkazy:
# http://sandbox.kidstrythisathome.com/erdos/ nebo http://www.webgraphviz.com/
#
# Staci zkopirovat obsah souboru do formulare webove stranky.
def make_graph(tree, filename="bst.dot"):
def dot_node(fd, node):
if node is None:
return
fd.write('{} [label="{}"]\n'.format(id(node), node.data))
for child, lr in (node.left, 'L'), (node.right, 'R'):
dot_node(fd, child)
dot_node_relations(fd, node, child, lr)
def dot_node_relations(fd, parent, node, direction):
if node is None:
nil = direction + str(id(parent))
fd.write('{} [label="",color=white]\n{} -> {}\n'
.format(nil, id(parent), nil))
else:
fd.write('{} -> {}\n'.format(id(parent), id(node)))
with open(filename, "w") as fd:
fd.write("digraph {\n")
fd.write("node [color=lightblue2,style=filled]\n")
dot_node(fd, tree.root)
fd.write("}\n")
##################################################################
# TESTS
##################################################################
bs_tree_0 = build_bst([0])
bs_tree_1 = build_bst([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
bs_tree_2 = build_bst([1, 1, 1, 1, 1, 2, 3, 3, 4, 5, 5, 5, 5, 6])
bs_tree_3 = BSTree()
node_0 = Node(0)
node_1 = Node(1)
node_2 = Node(2)
node_3 = Node(3)
node_4 = Node(4)
node_1.left = node_0
node_1.right = node_2
node_2.right = node_3
node_3.right = node_4
bs_tree_3.root = node_1
bs_tree_4 = BSTree()
node_1_1 = Node(1)
node_1_2 = Node(2)
node_1_3 = Node(3)
node_1_1.right = node_1_2
node_1_2.right = node_1_3
bs_tree_4.root = node_1_1
print(tree_height_n(bs_tree_0.root))
print(tree_height_n(bs_tree_1.root))
print(tree_height_n(bs_tree_2.root))
print(tree_height_n(bs_tree_3.root))
print(tree_height_n(bs_tree_4.root))
print("Check if binary tree is almost complete tree")
print(check_almost_complete(bs_tree_0)) # true
print(check_almost_complete(bs_tree_1)) # true
print(check_almost_complete(bs_tree_2)) # true
print(check_almost_complete(bs_tree_3)) # false
print(check_almost_complete(bs_tree_4)) # false
print("Check if all leaves of binary tree have same depth")
print(check_all_leaves_same_depth(bs_tree_0)) # true
print(check_all_leaves_same_depth(bs_tree_1)) # false
print(check_all_leaves_same_depth(bs_tree_2)) # true
print(check_all_leaves_same_depth(bs_tree_3)) # false
print(check_all_leaves_same_depth(bs_tree_4)) # true
|
python
|
class Item:
def __init__(self, name, tag, desc, intro):
self.name = name
self.tag = tag
self.desc = desc
self.intro = intro
def __str__(self):
return f"=> {self.name} - {self.desc}"
def getItem(self, player):
player.inventory.append(self)
def getIntro(self):
return self.intro
# so the way this is set up, items pass keyword arguments to constructor only
# intro is passed in positionally as first arg
class Gum(Item):
def __init__(self, intro):
super().__init__(name="Gum",
tag="gum",
desc="a single stick of gum.",
intro=intro)
class Screwdriver(Item):
def __init__(self, intro = "It's a screwdriver"):
super().__init__(name="Screwdriver",
tag="screwdriver",
desc="this could come in handy",
intro=intro)
|
python
|
# pylint: skip-file
|
python
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.core.task_queue."""
import tensorflow as tf
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_queue
from tfx.orchestration.experimental.core import test_utils
from tfx.utils import test_case_utils as tu
def _test_task(node_id, pipeline_id):
node_uid = task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid(pipeline_id=pipeline_id),
node_id=node_id)
return test_utils.create_exec_node_task(node_uid)
class TaskQueueTest(tu.TfxTest):
def test_task_queue_operations(self):
t1 = _test_task(node_id='trainer', pipeline_id='my_pipeline')
t2 = _test_task(node_id='transform', pipeline_id='my_pipeline')
tq = task_queue.TaskQueue()
# Enqueueing new tasks is successful.
self.assertTrue(tq.enqueue(t1))
self.assertTrue(tq.enqueue(t2))
# Re-enqueueing the same tasks fails.
self.assertFalse(tq.enqueue(t1))
self.assertFalse(tq.enqueue(t2))
# Dequeue succeeds and returns `None` when queue is empty.
self.assertEqual(t1, tq.dequeue())
self.assertEqual(t2, tq.dequeue())
self.assertIsNone(tq.dequeue())
self.assertIsNone(tq.dequeue(0.1))
# Re-enqueueing the same tasks fails as `task_done` has not been called.
self.assertFalse(tq.enqueue(t1))
self.assertFalse(tq.enqueue(t2))
tq.task_done(t1)
tq.task_done(t2)
# Re-enqueueing is allowed after `task_done` has been called.
self.assertTrue(tq.enqueue(t1))
self.assertTrue(tq.enqueue(t2))
def test_invalid_task_done_raises_errors(self):
t1 = _test_task(node_id='trainer', pipeline_id='my_pipeline')
t2 = _test_task(node_id='transform', pipeline_id='my_pipeline')
tq = task_queue.TaskQueue()
# Enqueue t1, but calling `task_done` raises error since t1 is not dequeued.
self.assertTrue(tq.enqueue(t1))
with self.assertRaisesRegex(RuntimeError, 'Must call `dequeue`'):
tq.task_done(t1)
# `task_done` succeeds after dequeueing.
self.assertEqual(t1, tq.dequeue())
tq.task_done(t1)
# Error since t2 is not in the queue.
with self.assertRaisesRegex(RuntimeError, 'Task not present'):
tq.task_done(t2)
if __name__ == '__main__':
tf.test.main()
|
python
|
import logging
def get_logger(log_filename=None, module_name=__name__, level=logging.INFO):
# select handler
if log_filename is None:
handler = logging.StreamHandler()
elif type(log_filename) is str:
handler = logging.FileHandler(log_filename, 'w')
else:
raise ValueError("log_filename invalid!")
# build logger
logger = logging.getLogger(module_name)
logger.setLevel(level)
handler.setLevel(level)
formatter = logging.Formatter(('%(asctime)s %(filename)s' \
'[line:%(lineno)d] %(levelname)s %(message)s'))
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def serialize_tree_level(tree):
level_dic = {}
def dfs(u, dep = 0):
if dep not in level_dic:
level_dic[dep] = []
s = "id: %s, child: " % tree[u].id
for i in tree[u].childst:
s += str(i) + ", "
s = s[: -2]
s += "\n"
level_dic[dep].append(s)
for i in tree[u].childst:
dfs(i, dep + 1)
dfs(len(tree) - 1)
s = ""
for i in level_dic:
s += "level %d: \n" % i
for j in level_dic[i]:
s += j
s += "\n"
return s
|
python
|
from view import View
from tkinter import Tk
class Controller:
def __init__(self, model):
self.model = model
self.view = View(self.model.graph.width(),
self.model.graph.height(),
self.model.graph_path)
def run(self):
self.view.draw_model(self.model)
self.view.root.mainloop()
|
python
|
#! /usr/bin/env python
# -*- Mode: Python -*-
# -*- coding: ascii -*-
"""
Dump layer name list
layer containing the mesh
"""
import lwsdk
__lwver__ = "11"
class HistoryData():
def __init__(self):
self.string = ''
self.select_contains = False
self.select_others = False
class DumpLayerNameCM(lwsdk.ICommandSequence):
def __init__(self, context):
super(DumpLayerNameCM, self).__init__()
def selectLayers(self, data):
obj_funcs = lwsdk.LWObjectFuncs()
state_query = lwsdk.LWStateQueryFuncs()
obj_name = state_query.object()
layer_list = state_query.layerList(lwsdk.OPLYR_NONEMPTY, obj_name)
# there is no mesh !
if layer_list == '':
message_funcs = lwsdk.LWMessageFuncs()
message_funcs.error('No mesh data', '')
return lwsdk.AFUNC_OK
current_obj = obj_funcs.focusObject()
layers = layer_list.split(' ')
foreground_layers = []
background_layers = []
for layer in layers:
layer_int = int(layer) - 1
# layer name is (unnamed), display None
layer_name = obj_funcs.layerName(current_obj, layer_int)
if layer_name == None:
layer_name = ''
if data.select_contains == (False if layer_name.find(data.string) < 0 else True):
foreground_layers.append(layer)
else:
background_layers.append(layer)
print('foreground_layers')
print(foreground_layers)
print('background_layers')
print(background_layers)
def process(self, mod_command):
data = HistoryData
data.string = "aaa"
data.select_contains = True
data.select_others = False
self.selectLayers(data)
return lwsdk.AFUNC_OK
ServerTagInfo = [
("LW_DumpLayerNameCM", lwsdk.SRVTAG_USERNAME | lwsdk.LANGID_USENGLISH),
("LW_DumpLayerNameCM", lwsdk.SRVTAG_BUTTONNAME | lwsdk.LANGID_USENGLISH),
("Utilities/LW_DumpLayerNameCM", lwsdk.SRVTAG_MENU | lwsdk.LANGID_USENGLISH)
]
ServerRecord = {lwsdk.CommandSequenceFactory(
"LW_DumpLayerNameCM", DumpLayerNameCM): ServerTagInfo}
|
python
|
# SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import os
import unittest
from unittest.mock import Mock, call, patch
from ci_workflow.ci_check_manifest_component import CiCheckManifestComponent
from ci_workflow.ci_target import CiTarget
from manifests.build_manifest import BuildManifest
from manifests.input_manifest import InputComponentFromDist
class TestCiCheckManifestComponent(unittest.TestCase):
DATA = os.path.join(os.path.dirname(__file__), "data")
BUILD_MANIFEST = os.path.join(DATA, "opensearch-1.1.0-x64-build-manifest.yml")
@patch("manifests.distribution.find_build_root")
@patch("ci_workflow.ci_check_manifest_component.BuildManifest")
def test_retrieves_manifests(self, mock_manifest: Mock, find_build_root: Mock):
find_build_root.return_value = 'url/linux/ARCH/builds/opensearch'
check = CiCheckManifestComponent(InputComponentFromDist({
"name": "common-utils",
"dist": "url"
}), CiTarget(version="1.1.0", name="opensearch", snapshot=True))
mock_manifest.from_url.return_value = BuildManifest.from_path(self.BUILD_MANIFEST)
check.check()
mock_manifest.from_url.assert_has_calls([
call("url/linux/ARCH/builds/opensearch/manifest.yml"),
call("url/linux/ARCH/builds/opensearch/manifest.yml"),
])
find_build_root.assert_has_calls([
call('url', 'linux', 'x64', 'opensearch'),
call('url', 'linux', 'arm64', 'opensearch'),
])
@patch("manifests.distribution.find_build_root")
@patch("ci_workflow.ci_check_manifest_component.BuildManifest")
def test_missing_component(self, mock_manifest: Mock, find_build_root: Mock):
find_build_root.return_value = 'url/linux/x64/builds/opensearch'
check = CiCheckManifestComponent(InputComponentFromDist({
"name": "does-not-exist",
"dist": "url"
}), CiTarget(version="1.1.0", name="opensearch", snapshot=True))
mock_manifest.from_url.return_value = BuildManifest.from_path(self.BUILD_MANIFEST)
with self.assertRaises(CiCheckManifestComponent.MissingComponentError) as ctx:
check.check()
self.assertEqual(str(ctx.exception), "Missing does-not-exist in url/linux/x64/builds/opensearch/manifest.yml.")
find_build_root.assert_called()
|
python
|
from plugins.adversary.app.operation.operation import Step, OPVar, OPHost, OPRat, OPSoftware
from plugins.adversary.app.commands import *
from plugins.adversary.app.custom import *
class WebServerInstall(Step):
""" Description:
This step prepares the installation of a PHP webserver.
Requirements:
This step only requires the existence of a RAT on a host in order to run.
"""
display_name = 'webserver_install'
summary = 'Prepares webserver installation'
attack_mapping = [('T1094', 'Command and Control')]
preconditions = [('rat', OPRat({'elevated': True })),
('host', OPHost(OPVar('rat.host')))]
postconditions = [('software_g', OPSoftware({'name': 'webserver', 'installed': False, 'downloaded': False}))]
significant_parameters = ['host']
@staticmethod
def description(host):
return 'Preparing webserver install on {}'.format(host.fqdn)
@staticmethod
async def action(operation, rat, host, software_g):
name = 'webserver'
download_url = 'http://www.usbwebserver.net/downloads/USBWebserver%20v8.6.zip'
download_loc = (get_temp_folder(host, rat) + '{}.zip'.format(random_string()))
install_loc = (get_temp_folder(host, rat) + '{}\\'.format(random_string()))
install_command = {
'process': 'powershell.exe',
'args': '/command "Add-Type -A System.IO.Compression.FileSystem; [IO.Compression.ZipFile]::ExtractToDirectory(\'{}\', \'{}\')"'.format(download_loc, install_loc),
}
(await software_g({
'host': host,
'name': name,
'installed': False,
'install_command': install_command,
'install_loc': install_loc,
'downloaded': False,
'download_url': download_url,
'download_loc': download_loc,
}))
return True
@staticmethod
async def cleanup(cleaner, host, software_g):
for software in software_g:
if (not (await cleaner.run_on_agent(host, command.CommandLine('rmdir /s /q {}'.format(software.install_loc)), (lambda x: (x.strip() == ''))))):
(await cleaner.console_log(host, "Can't delete webserver folder on {} ({})".format(host.fqdn, software.install_loc)))
|
python
|
from django.apps import AppConfig
from django.db.models.signals import post_save, post_delete
from django.conf import settings
class SyncConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'sync'
def ready(self):
try:
from .signals import init_signals
init_signals()
print("Custom Signals Initialised")
except ImportError:
print("No Custom Signals")
|
python
|
# -*- coding: utf-8 -*-
"""Launch small HTTP server for TimeoutTest test case
Should work with Python 2 and 3.
"""
import sys
import time
try:
from SimpleHTTPServer import SimpleHTTPRequestHandler as RequestHandler
except ImportError:
from http.server import CGIHTTPRequestHandler as RequestHandler
try:
from SocketServer import TCPServer as HTTPServer
except ImportError:
from http.server import HTTPServer
PYTHON_VERSION = sys.version_info[0]
class Handler(RequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.end_headers()
response_string = """
<?xml version="1.0" encoding="utf-8" ?>
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<soap:Header>
<t:ServerVersionInfo MajorVersion="8" MinorVersion="0" MajorBuildNumber="685" MinorBuildNumber="8"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types" />
</soap:Header>
<soap:Body>
<BogusResponse xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types"
xmlns="http://schemas.microsoft.com/exchange/services/2006/messages">
<m:ResponseMessages>
<m:BogusResponseMessage ResponseClass="Success">
<m:ResponseCode>NoError</m:ResponseCode>
</m:BogusResponseMessage>
</m:ResponseMessages>
</BogusResponse>
</soap:Body>
</soap:Envelope>
"""
if PYTHON_VERSION is 3:
response = bytes(response_string, "utf-8")
else:
response = response_string
self.wfile.write(response)
def do_POST(self):
self.do_GET()
def log_message(self, format, *args):
return
server = HTTPServer(("localhost", 8080), Handler)
server.serve_forever()
|
python
|
"""
PPO with tensorflow implementation
The goal of RL is to find an optimal behavior strategy for the agent to obtain
optimal rewards. The policy gradient methods target at modeling and optimizing
the policy directly. The policy loss is defined as
L = E [log pi (a|s)] * AF
where, 'L' is the policy loss, 'E' is the expected, 'log pi(a|s)' log probability
of taking the action at that state. 'AF' is the advantage.
PPO is an on-policy algorithm which can be used for environments with either discrete
or continous actions spaces. There are two primary variants of PPO: PPO-penalty which
approximately solves a KL-constrained update like TRPO, but penalizes the KL-divergence
in the objective function instead of make it a hard constraint; PPO-clip which does not
have a KL-divergence term in the objective and does not have a constraint at all,
instead relies on specialized clipping in the objective function to remove incentives
for the new policy to get far from the old policy. This implementation uses PPO-clip.
PPO is a policy gradient method and can be used for environments with either discrete
or continuous action spaces. It trains a stochastic policy in an on-policy way. Also,
it utilizes the actor critic method. The actor maps the observation to an action and
the critic gives an expectation of the rewards of the agent for the observation given.
Firstly, it collects a set of trajectories for each epoch by sampling from the latest
version of the stochastic policy. Then, the rewards-to-go and the advantage estimates
are computed in order to update the policy and fit the value function. The policy is
updated via a stochastic gradient ascent optimizer, while the value function is fitted
via some gradient descent algorithm. This procedure is applied for many epochs until
the environment is solved.
references:
[1] https://arxiv.org/pdf/1707.06347.pdf
[2] https://spinningup.openai.com/en/latest/algorithms/ppo.html
[3] https://keras.io/examples/rl/ppo_cartpole/
"""
import numpy as np
import tensorflow as tf
import gym
import scipy.signal
import datetime
import argparse
import tensorflow.keras.backend as K
from gym import wrappers
import os
"""
Replay Buffer, store experiences and calculate total rewards, advanteges
the buffer will be used for update the policy
"""
class ReplayBuffer:
def __init__(self, obs_dim, size, gamma=0.99, lamda=0.95):
self.obs_buf = np.zeros((size, obs_dim), dtype=np.float32) # states
self.act_buf = np.zeros(size, dtype=np.int32) # action, based on stochasitc policy with teh probability
self.rew_buf = np.zeros(size, dtype=np.float32) # step reward
self.ret_buf = np.zeros(size, dtype=np.float32) # ep_return, total reward of episode
self.val_buf = np.zeros(size, dtype=np.float32) # value of (s,a), output of critic net
self.adv_buf = np.zeros(size, dtype=np.float32) # advantege Q(s,a)-V(s)
self.logprob_buf = np.zeros(size, dtype=np.float32) # prediction: action probability, output of actor net
self.gamma, self.lamda = gamma, lamda
self.ptr, self.idx = 0, 0 # buffer ptr, and current trajectory start index
def store(self, observation, action, reward, value, logprob):
#print("storing", state[0].shape, action.shape, reward, prediction.shape, value.shape)
self.obs_buf[self.ptr]=observation
self.act_buf[self.ptr]=action
self.rew_buf[self.ptr]=reward
self.val_buf[self.ptr]=value
self.logprob_buf[self.ptr]=logprob
self.ptr += 1
"""
For each epidode, calculating the total reward and advanteges with specific
"""
def ep_update(self, lastValue = 0):
"""
magic from rllab for computing discounted cumulative sums of vectors
input: vector x: [x0, x1, x2]
output: [x0+discount*x1+discount^2*x2, x1+discount*x2, x2]
"""
def discount_cumsum(x,discount):
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
ep_slice = slice(self.idx, self.ptr)
rews = np.append(self.rew_buf[ep_slice], lastValue)
vals = np.append(self.val_buf[ep_slice], lastValue)
deltas = rews[:-1]+self.gamma*vals[1:]-vals[:-1]
# General Advantege Estimation
self.adv_buf[ep_slice] = discount_cumsum(deltas, self.gamma*self.lamda)
# rewards-to-go, which is targets for the value function
self.ret_buf[ep_slice] = discount_cumsum(rews, self.gamma)[:-1]
self.idx = self.ptr
def get(self):
# get all data of the buffer and normalize the advantages
self.ptr, self.idx = 0, 0
adv_mean, adv_std = np.mean(self.adv_buf), np.std(self.adv_buf)
self.adv_buf = (self.adv_buf-adv_mean)/adv_std
return dict(
states=self.obs_buf,
actions=self.act_buf,
advantages=self.adv_buf,
returns=self.ret_buf,
logprobs=self.logprob_buf,
)
"""
loss print call back
"""
class PrintLoss(tf.keras.callbacks.Callback):
def on_epoch_end(self,epoch,logs={}):
print("epoch index", epoch+1, "loss", logs.get('loss'))
"""
build a feedforward neural network
"""
def mlp(obsDim, hiddenSize, numActions, outputActivation=None):
inputs = tf.keras.Input(shape=(obsDim,), dtype=tf.float32)
x = tf.keras.layers.Dense(units=hiddenSize[0], activation='tanh')(inputs)
for i in range(1, len(hiddenSize)):
x = tf.keras.layers.Dense(units=hiddenSize[i], activation='tanh')(x)
logits = tf.keras.layers.Dense(units=numActions, activation=outputActivation)(x)
return tf.keras.Model(inputs = inputs, outputs=logits)
def logprobabilities(logits, action, numActions):
logprob_all = tf.nn.log_softmax(logits)
logprob = tf.reduce_sum(tf.one_hot(action, numActions)*logprob_all, axis=1)
return logprob
"""
Actor net
"""
class ActorModel:
def __init__(self, obsDim, hiddenSize, numActions, clipRatio, lr):
self.policyNN = self.build_model(obsDim, hiddenSize, numActions, lr)
self.clipRatio = clipRatio
self.numActions = numActions
self.lossPrinter = PrintLoss()
self.optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
def build_model(self, obsDim, hiddenSize, numActions, lr):
model = mlp(obsDim, hiddenSize, numActions)
# model.compile(loss=self.ppo_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=lr))
# print(model.summary())
return model
# def ppo_loss(self, y_true, y_pred):
# # y_true: np.hstack([advantages, predictions, actions])
# advs,o_pred,acts = y_true[:,:1],y_true[:,1:1+self.numActions],y_true[:,1+self.numActions:]
# # print(y_pred, advs, picks, acts)
# prob = y_pred*acts
# old_prob = o_pred*acts
# ratio = prob/(old_prob + 1e-10)
# p1 = ratio*advs
# p2 = K.clip(ratio, 1-self.clipRatio, 1+self.clipRatio)*advs
# # total loss = policy loss + entropy loss (entropy loss for promote action diversity)
# loss = -K.mean(K.minimum(p1,p2)+self.beta*(-y_pred*K.log(y_pred+1e-10)))
# return loss
# def fit(self,states,y_true,epochs,batch_size):
# self.actor.fit(states, y_true, epochs=epochs, verbose=0, shuffle=True, batch_size=batch_size, callbacks=[self.lossPrinter])
def predict(self, obs):
obs = obs.reshape(1,-1)
logits = self.policyNN(obs)
action = tf.squeeze(tf.random.categorical(logits, 1),axis=1)
return logits, action
@tf.function
def train_policy(self, obs_buf, act_buf, logprob_buf, adv_buf):
# Record operation for automtic differentiation
with tf.GradientTape() as tape:
logits = self.policyNN(obs_buf)
ratio = tf.exp(logprobabilities(logits, act_buf, self.numActions)-logprob_buf)
minAdv = tf.where(adv_buf > 0, (1+self.clipRatio)*adv_buf, (1-self.clipRatio)*adv_buf)
policyLoss = -tf.reduce_mean(tf.minimum(ratio*adv_buf, minAdv))
policyGrads = tape.gradient(policyLoss, self.policyNN.trainable_variables)
self.optimizer.apply_gradients(zip(policyGrads, self.policyNN.trainable_variables))
k1 = tf.reduce_mean(logprob_buf - logprobabilities(self.policyNN(obs_buf), act_buf, self.numActions))
k1 = tf.reduce_sum(k1)
return k1
"""
Critic net
"""
class CriticModel:
def __init__(self, obsDim, hiddenSize, lr):
self.valueNN = self.build_model(obsDim, hiddenSize, lr)
self.lossPrinter = PrintLoss()
self.optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
def build_model(self, obsDim, hiddenSize, lr):
model = mlp(obsDim, hiddenSize, 1)
# model.compile(loss="mse",optimizer=tf.keras.optimizers.Adam(learning_rate=lr))
# print(model.summary())
return model
def predict(self,obs):
obs = obs.reshape(1,-1)
digits = self.valueNN(obs)
value = tf.squeeze(digits, axis=1)
return value
# def fit(self,states,y_true,epochs,batch_size):
# self.critic.fit(states, y_true, epochs=epochs, verbose=0, shuffle=True, batch_size=batch_size, callbacks=[self.lossPrinter])
@tf.function
def train_value(self, obs_buf, ret_buf):
# Record operations for automatic differentiation
with tf.GradientTape() as tape:
valueLoss = tf.reduce_mean((ret_buf - self.valueNN(obs_buf)) ** 2)
valueGrads = tape.gradient(valueLoss, self.valueNN.trainable_variables)
self.optimizer.apply_gradients(zip(valueGrads, self.valueNN.trainable_variables))
"""
PPO Agent
"""
class PPOAgent:
def __init__(self, obsDim, hiddenSize, numActions, clipRatio, policyLR, valueLR, memorySize, gamma, lamda, targetK1):
self.buffer = ReplayBuffer(obsDim, memorySize, gamma, lamda)
self.Actor = ActorModel(obsDim, hiddenSize, numActions, clipRatio, policyLR)
self.Critic = CriticModel(obsDim, hiddenSize, valueLR)
self.actDim = numActions
self.targetK1 = targetK1
def action(self, obs):
# sample action from actor
logits, action = self.Actor.predict(obs)
# get log-probability of taking actins by using the logits
logprob = logprobabilities(logits, action, self.actDim)
# get value
value = self.Critic.predict(obs)
return logprob, action, value
def train(self, itActor=80, itCritic=80):
data = self.buffer.get()
obs_buf = data['states']
act_buf = data['actions']
adv_buf = data['advantages']
ret_buf = data['returns']
logprob_buf = data['logprobs']
# train polict network
for _ in range(itActor):
k1 = self.Actor.train_policy(obs_buf, act_buf, logprob_buf, adv_buf)
if k1 > 1.5 * self.targetK1:
break # Early Stopping
# train value network
for _ in range(itCritic):
self.Critic.train_value(obs_buf, ret_buf)
#######
np.random.seed(123)
def make_video(env, agent):
env = wrappers.Monitor(env,os.path.join(os.getcwd(),"videos"), force=True)
rewards = 0
steps = 0
done = False
obs = env.reset()
while not done:
env.render()
logprob, action, value = agent.action(obs)
obs, reward, done, _ = env.step(action[0].numpy())
steps += 1
rewards += reward
if done:
env.reset()
print("Test Step {} Rewards {}".format(steps, rewards))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--max_ep', type=int, default=10000)
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
maxEpoch = args.max_ep
epSteps = 4000
gamma = 0.99
lamda = 0.97
clipRatio = 0.2
policyLearningRate = 3e-4
valueLearningRate = 1e-3
policyTrainingIteration = 80
valueTrainingIteration = 80
targetK1 = 0.01
currTime = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logDir = 'logs/ppo' + currTime
summaryWriter = tf.summary.create_file_writer(logDir)
env = gym.make('CartPole-v0')
obsDim = env.observation_space.shape[0]
numActions = env.action_space.n
hiddenSize = [64,64]
agent = PPOAgent(obsDim,hiddenSize,numActions,clipRatio,policyLearningRate,valueLearningRate,epSteps,gamma,lamda,targetK1)
obs, epReturn, epLength = env.reset(), 0, 0
# Iteration over the number of epochs
for ep in range(maxEpoch):
sumReturn = 0
sumLength = 0
numEpisodes = 0
# Iterate over the steps of each epoch
for t in range(epSteps):
logprob, action, value = agent.action(obs)
newobs, reward, done, _ = env.step(action[0].numpy())
epReturn += reward
epLength += 1
agent.buffer.store(obs, action, reward, value, logprob)
obs = newobs
# finish trajectory if reach to a terminal state
if done or (t == epSteps-1):
lastValue = 0 if done else agent.Critic.predict(obs)
agent.buffer.ep_update(lastValue)
sumReturn += epReturn
sumLength += epLength
numEpisodes += 1
with summaryWriter.as_default():
tf.summary.scalar('episode reward', epReturn, step=numEpisodes)
obs, epReturn, epLength = env.reset(), 0, 0
# update policy and value function
agent.train(policyTrainingIteration, valueTrainingIteration)
print("Episode: {} Average Rewards: {:.4f} Mean Length {:.4f} ".format(ep+1, sumReturn/numEpisodes, sumLength/numEpisodes))
make_video(env, agent)
env.close()
|
python
|
#!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import re
import pwd
import grp
import errno
import config
import subprocess
import simplegist
import unicodedata
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from tornado.options import options
from jinja2 import Environment, FileSystemLoader
import tornado.web
api_logger = config.getlog()
class BaseHandler(tornado.web.RequestHandler):
"""
Base Class used on every Handler
"""
def checkMaven(self):
pass
class execCommand(object):
def __init__(self, cmdlaunch):
self.cmdlaunch = cmdlaunch
def execute(self):
launch = subprocess.Popen(self.cmdlaunch, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, err = launch.communicate()
return output, err
class Utils(object):
def lastlines(self, hugefile, n, bsize=2048):
# get newlines type, open in universal mode to find it
with open(hugefile, 'rU') as hfile:
if not hfile.readline():
return # empty, no point
sep = hfile.newlines # After reading a line, python gives us this
assert isinstance(sep, str), 'multiple newline types found, aborting'
# find a suitable seek position in binary mode
with open(hugefile, 'rb') as hfile:
hfile.seek(0, os.SEEK_END)
linecount = 0
pos = 0
while linecount <= n + 1:
# read at least n lines + 1 more; we need to skip a partial line later on
try:
hfile.seek(-bsize, os.SEEK_CUR) # go backwards
linecount += hfile.read(bsize).count(sep) # count newlines
hfile.seek(-bsize, os.SEEK_CUR) # go back again
except IOError as e:
if e.errno == errno.EINVAL:
# Attempted to seek past the start, can't go further
bsize = hfile.tell()
hfile.seek(0, os.SEEK_SET)
linecount += hfile.read(bsize).count(sep)
break
raise # Some other I/O exception, re-raise
pos = hfile.tell()
# Re-open in text mode
with open(hugefile, 'r') as hfile:
hfile.seek(pos, os.SEEK_SET) # our file position from above
for line in hfile:
# We've located n lines *or more*, so skip if needed
if linecount > n:
linecount -= 1
continue
# The rest we yield
yield line
def checkAndcreate(self, dir, user, group):
if not os.path.exists(dir):
os.makedirs(dir)
uid = pwd.getpwnam(user).pw_uid
gid = grp.getgrnam(group).gr_gid
os.chown(dir, uid, gid)
return 1
return 0
def changeOwner(self, filePath, user, group):
if os.path.exists(filePath):
uid = pwd.getpwnam(user).pw_uid
gid = grp.getgrnam(group).gr_gid
os.chown(filePath, uid, gid)
return 1
return 0
def write_module(self, module_name, module_lang, source_code, dst_path, module_type):
"""Gets the source code of a module from a GitHub gist.
Args:
module_name: The name of the module.
module_lang: Code language.
source_code: Gist url.
dst_path: Absolute path for module on file sytem.
Returns:
The file system path of the newly created module.
Raises:
IOError: An error occurred accessing GitHub or creating the source files.
"""
print(type(source_code))
api_logger.info("Module name: " + str(module_name))
api_logger.info("Module lang: " + str(module_lang))
# api_logger.info("Source code: "+str(source_code))
api_logger.info("DST_PATH: " + str(dst_path))
api_logger.info("MODULE Type: " + str(module_type))
if module_lang == "py":
file_name = os.path.join(dst_path, module_name.lower() + "." + module_lang)
elif module_lang == "java":
file_name = os.path.join(dst_path, module_name + "." + module_lang)
# Get file name for gist and put into
try:
with open(file_name, "w") as text_file:
text_file.write(unicodedata.normalize('NFKD', source_code).encode('ascii', 'ignore'))
self.changeOwner(file_name, "storm", "storm")
except Exception as e:
print(str(e))
api_logger.error(str(e))
raise e
if module_lang == "py":
# Time to jinja2
# Check module type
if module_type == "drain":
boltType = "drains"
dst_path = options.backend_java_path_drains
template_name = options.backend_template_path + "boltjava2python.tmpl"
elif module_type == "bolt":
boltType = "bolts"
dst_path = options.backend_java_path_bolts
template_name = options.backend_template_path + "boltjava2python.tmpl"
elif module_type == "spout":
boltType = "spouts"
dst_path = options.backend_java_path_spouts
template_name = options.backend_template_path + "spoutjava2python.tmpl"
env = Environment(loader=FileSystemLoader('/'))
template = env.get_template(template_name)
file_name = os.path.join(dst_path, module_name + ".java")
try:
with open(file_name, "w") as text_file:
text_file.write(
template.render(boltName=module_name, boltType=boltType, boltNamelowercase=module_name.lower()))
self.changeOwner(file_name, "storm", "storm")
except Exception as e:
api_logger.error(str(e))
raise e
return file_name
def get_module(self, module_name, module_lang, gist_url, dst_path, module_type):
"""Gets the source code of a module from a GitHub gist.
Args:
module_name: The name of the module.
module_lang: Code language.
gist_url: Gist url.
dst_path: Absolute path for module on file sytem.
Returns:
The file system path of the newly created module.
Raises:
IOError: An error occurred accessing GitHub or creating the source files.
"""
# Start gist handler
API_TOKEN = options.gist_api_token
USERNAME = options.gist_username
GHgist = simplegist.Simplegist(username=USERNAME, api_token=API_TOKEN)
api_logger.info("Module name: " + str(module_name))
api_logger.info("Module lang: " + str(module_lang))
api_logger.info("Gist URL: " + str(gist_url))
api_logger.info("DST_PATH: " + str(dst_path))
api_logger.info("MODULE Type: " + str(module_type))
# Get Id and user from URL
gist_id_reg = re.compile('([a-zA-Z0-9]+)')
gist_user, gist_id = gist_id_reg.findall(urlparse(gist_url).path)
api_logger.info("Gist USER: " + str(gist_user))
api_logger.info("Gist ID: " + str(gist_id))
# Download code from GIST
GHgist.profile().getgist(id=gist_id)
# Authenticate using a GitHub API access token.
if module_lang == "py":
file_name = os.path.join(dst_path, module_name.lower() + "." + module_lang)
elif module_lang == "java":
file_name = os.path.join(dst_path, module_name + "." + module_lang)
else:
file_name = None
# Get file name for gist and put into
try:
with open(file_name, "w") as text_file:
text_file.write(
unicodedata.normalize('NFKD', GHgist.profile().content(id=gist_id)).encode('ascii', 'ignore'))
self.changeOwner(file_name, "storm", "storm")
except Exception as e:
api_logger.error(str(e))
raise e
if module_lang == "py":
# Time to jinja2
# Check module type
if module_type == "drain":
boltType = "drains"
dst_path = options.backend_java_path_drains
template_name = options.backend_template_path + "boltjava2python.tmpl"
elif module_type == "bolt":
boltType = "bolts"
dst_path = options.backend_java_path_bolts
template_name = options.backend_template_path + "boltjava2python.tmpl"
elif module_type == "spout":
boltType = "spouts"
dst_path = options.backend_java_path_spouts
template_name = options.backend_template_path + "spoutjava2python.tmpl"
env = Environment(loader=FileSystemLoader('/'))
template = env.get_template(template_name)
file_name = os.path.join(dst_path, module_name + ".java")
try:
with open(file_name, "w") as text_file:
text_file.write(
template.render(boltName=module_name, boltType=boltType, boltNamelowercase=module_name.lower()))
self.changeOwner(file_name, "storm", "storm")
except Exception as e:
api_logger.error(str(e))
raise e
return file_name
|
python
|
import math as m
import numpy as np
from matplotlib import pyplot as plt
from BDPoisson1D import dirichlet_non_linear_poisson_solver_amr
from BDFunction1D import Function
from BDFunction1D.Functional import Functional
class TestFunction(Function):
"""
Some known differentiable function
"""
def evaluate_point(self, x):
return m.exp(-x * 3)
class TestFunctional(Functional):
def __init__(self, Nd, kT, f):
super(TestFunctional, self).__init__(f)
self.Nd = Nd
self.kT = kT
def evaluate_point(self, x):
return self.Nd(x) * (1 - (m.exp(-self.f.evaluate_point(x) / self.kT)))
class TestFunctionalDf(Functional):
def __init__(self, Nd, kT, f):
super(TestFunctionalDf, self).__init__(f)
self.Nd = Nd
self.kT = kT
def evaluate_point(self, x):
return self.Nd(x) / self.kT * m.exp(-self.f.evaluate_point(x) / self.kT)
Nd = lambda x: np.ones_like(x)
kT = 1 / 20
Psi = TestFunction()
f = TestFunctional(Nd, kT, Psi)
dfdPsi = TestFunctionalDf(Nd, kT, Psi)
start = 0.0
stop = 5.0
step = 0.5
bc1 = 1.0
bc2 = 0.0
solution = dirichlet_non_linear_poisson_solver_amr(start, stop, step, Psi, f, dfdPsi, bc1, bc2,
max_iter=1000, residual_threshold=1.5e-3,
int_residual_threshold=1.5e-4,
max_level=20, mesh_refinement_threshold=1e-7)
fig, (ax1, ax2) = plt.subplots(2, sharex=True)
nodes = np.linspace(start, stop, num=int((stop-start)/step+1))
ax1.plot(nodes, solution.evaluate(nodes), '-')
ax2.plot(nodes, solution.error(nodes), '-')
plt.show()
|
python
|
#!/usr/bin/env python
import glob
for name in glob.glob('dir/*'):
print name
|
python
|
"""
Image conversion functions.
"""
# Copyright (c) 2020 Ben Zimmer. All rights reserved.
from typing import Tuple
import numpy as np
from PIL import Image
# Some functions for colorizing single channel black and white image (PIL "L" mode)
# or the alpha channels of text_scala output.
# ~~~~ function from text_scala
def colorize(img: np.ndarray, color: Tuple) -> np.ndarray:
"""colorize a single-channel (alpha) image into a 4-channel RGBA image"""
# ensure color to RGBA
if len(color) == 3:
color = (color[0], color[1], color[2], 255)
# created result image filled with solid "color"
res = np.zeros((img.shape[0], img.shape[1], 4), dtype=np.ubyte)
res[:, :, 0:4] = color
# scale the alpha component by the image
# (this comes into play if "color" has alpha < 255)
res[:, :, 3] = color[3] / 255.0 * img
# set the RGB of completely transparent pixels to zero
res[res[:, :, 3] == 0, 0:3] = (0, 0, 0)
return res
# ~~~~ function the old text module
# pretty much the only difference between these is order of operations
# in scaling of alpha. Could programatically verify that both do the
# same thing.
def l_to_rgba(img: np.ndarray, color: Tuple) -> np.ndarray:
"""create a colorized transparent image from black and white"""
# create result image filled with solid "color"
height, width = img.shape
solid = Image.new("RGBA", (width, height), color)
res = np.array(solid)
# scale the alpha component by the image
# (this comes into play if "color" has alpha < 255)
res[:, :, 3] = res[:, :, 3] * (img / 255.0)
# set the RGB of completely transparent pixels to zero
res[res[:, :, 3] == 0, 0:3] = (0, 0, 0)
return res
|
python
|
import pandas as pd
def generate_train(playlists):
# define category range
cates = {'cat1': (10, 50), 'cat2': (10, 78), 'cat3': (10, 100), 'cat4': (40, 100), 'cat5': (40, 100),
'cat6': (40, 100),'cat7': (101, 250), 'cat8': (101, 250), 'cat9': (150, 250), 'cat10': (150, 250)}
cat_pids = {}
for cat, interval in cates.items():
df = playlists[(playlists['num_tracks'] >= interval[0]) & (playlists['num_tracks'] <= interval[1])].sample(
n=1000)
cat_pids[cat] = list(df.pid)
playlists = playlists.drop(df.index)
playlists = playlists.reset_index(drop=True)
return playlists, cat_pids
def generate_test(cat_pids, playlists, interactions, tracks):
def build_df_none(cat_pids, playlists, cat, num_samples):
df = playlists[playlists['pid'].isin(cat_pids[cat])]
df = df[['pid', 'num_tracks']]
df['num_samples'] = num_samples
df['num_holdouts'] = df['num_tracks'] - df['num_samples']
return df
def build_df_name(cat_pids, playlists, cat, num_samples):
df = playlists[playlists['pid'].isin(cat_pids[cat])]
df = df[['name', 'pid', 'num_tracks']]
df['num_samples'] = num_samples
df['num_holdouts'] = df['num_tracks'] - df['num_samples']
return df
df_test_pl = pd.DataFrame()
df_test_itr = pd.DataFrame()
df_eval_itr = pd.DataFrame()
for cat in list(cat_pids.keys()):
if cat == 'cat1':
num_samples = 0
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
# all interactions used for evaluation
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
df_eval_itr = pd.concat([df_eval_itr, df_itr])
# clean interactions for training
interactions = interactions.drop(df_itr.index)
print("cat1 done")
if cat == 'cat2':
num_samples = 1
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[df_itr['pos'] == 0]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat2 done")
if cat == 'cat3':
num_samples = 5
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat3 done")
if cat == 'cat4':
num_samples = 5
df = build_df_none(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat4 done")
if cat == 'cat5':
num_samples = 10
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat5 done")
if cat == 'cat6':
num_samples = 10
df = build_df_none(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat6 done")
if cat == 'cat7':
num_samples = 25
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat7 done")
if cat == 'cat8':
num_samples = 25
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
for pid in cat_pids[cat]:
df = df_itr[df_itr['pid'] == pid]
df_sample = df.sample(n=num_samples)
df_test_itr = pd.concat([df_test_itr, df_sample])
df = df.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df])
print("cat8 done")
if cat == 'cat9':
num_samples = 100
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat9 done")
if cat == 'cat10':
num_samples = 100
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
for pid in cat_pids[cat]:
df = df_itr[df_itr['pid'] == pid]
df_sample = df.sample(n=num_samples)
df_test_itr = pd.concat([df_test_itr, df_sample])
df = df.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df])
print("cat10 done")
tids = set(df_eval_itr['tid'])
df = tracks[tracks['tid'].isin(tids)]
df = df[['tid', 'arid']]
df_eval_itr = pd.merge(df_eval_itr, df, on='tid')
df_test_pl = df_test_pl.reset_index(drop=True)
df_test_itr = df_test_itr.reset_index(drop=True)
df_eval_itr = df_eval_itr.reset_index(drop=True)
interactions = interactions.reset_index(drop=True) # return as train_interactions
return df_test_pl, df_test_itr, df_eval_itr, interactions
def split_dataset(df_playlists, df_interactions, df_tracks):
"""
Split the MPD according to Challenge_set features
:param df_playlists: DataFrame from "playlists.csv"
:param df_interactions: DataFrame from "interactions.csv"
:param df_tracks: DataFrame from "tracks.csv"
:return: df_train_pl: a DataFrame with same shape as "playlists.csv" for training
df_train_itr: a DataFrame with same shape as "interactions.csv" for training
df_test_pl: a DataFrame of 10,000 incomplete playlists for testing
df_test_itr: a DataFrame with same shape as " interactions.csv" for testing
df_eval_itr: a DataFrame of holdout interactions for evaluation
"""
df_train_pl, cat_pids = generate_train(df_playlists)
df_test_pl, df_test_itr, df_eval_itr, df_train_itr = generate_test(cat_pids, df_playlists, df_interactions, df_tracks)
return df_train_pl, df_train_itr, df_test_pl, df_test_itr, df_eval_itr
|
python
|
'''
Copyright (C) 2018 PyElo.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import math
# Expected score of player A with rating 'rating_a' against player B with
# 'rating_b'.
def expected_score(rating_a, rating_b):
return 1.0 / (1.0 + 10.0 ** ((rating_b - rating_a) / 400.0))
# Change in rating based on expected and actual score.
def rating_delta(score, expected, k=20):
if k <= 0:
raise ValueError("k must be positive.")
return k * (score - expected)
# Update individual ratings after a 1v1 match. The pair of new ratings is
# returned as a tuple (new rating of player A, new rating of B). K factors may
# be individually set for both players.
def update_rating(rating_a, rating_b, score, k_a=20, k_b=20):
if k_a <= 0:
raise ValueError("k_a must be positive.")
if k_b <= 0:
raise ValueError("k_b must be positive.")
expected_a = expected_score(rating_a, rating_b)
expected_b = 1 - expected_a
rating_a += rating_delta(score, expected_a, k_a)
rating_b += rating_delta(1 - score, expected_b, k_b)
return (rating_a, rating_b)
# Expected score of team A against team B. Teams are a list of player ratings.
def expected_team_score(team_a, team_b):
if len(team_a) == 0:
raise ValueError("team_a must have at least one rating.")
if len(team_b) == 0:
raise ValueError("team_b must have at least one rating.")
return expected_score(sum(team_a), sum(team_b))
# Convert Elo ratings to the Bradley-Terry scale.
def elo_to_bt(elo_rating):
return 10.0 ** (elo_rating / 400.0)
# Update team ratings, where a team is a collection of ratings. The pair of new
# ratings is returned of (new ratings of team A, new ratings of team B) in the
# given order. K factors may be individually set for both teams.
def update_team_rating(team_a, team_b, score, k_a=20, k_b=20):
if k_a <= 0:
raise ValueError("k_a must be positive.")
if k_b <= 0:
raise ValueError("k_b must be positive.")
if len(team_a) == 0:
raise ValueError("team_a must have at least one rating.")
if len(team_b) == 0:
raise ValueError("team_b must have at least one rating.")
expected_a = expected_team_score(team_a, team_b)
expected_b = 1 - expected_a
delta_a = rating_delta(score, expected_a, k_a * len(team_a))
delta_b = rating_delta(1 - score, expected_b, k_b * len(team_b))
# Teams' ratings converted to the Bradley-Terry scale.
bt_team_a = [elo_to_bt(rating) for rating in team_a]
bt_team_b = [elo_to_bt(rating) for rating in team_b]
# Calculate normalization quotient.
norm_bt_team_a = sum(bt_team_a)
norm_bt_team_b = sum(bt_team_b)
# Normalize Bradley-Terry team ratings.
bt_team_a = [rating / norm_bt_team_a for rating in bt_team_a]
bt_team_b = [rating / norm_bt_team_b for rating in bt_team_b]
# Apply deltas in terms of normalized ratings.
team_a_delta = [delta_a * rating for rating in bt_team_a]
team_b_delta = [delta_b * rating for rating in bt_team_b]
# Return updated ratings.
return ([rating + delta for rating, delta in zip(team_a, team_a_delta)], [rating + delta for rating, delta in zip(team_b, team_b_delta)])
# Expected score in a match with multiple ranks.
def expected_rank_score(ranks):
if len(ranks) <= 1:
raise ValueError("The length of ranks must be 2 or greater.")
return [sum(expected_score(ranks[i], opp_rating) for j, opp_rating in enumerate(ranks) if i != j) for i, rating in enumerate(ranks)]
# Expected placing in a match with multiple ranks. Return values are not
# rounded to the nearest integer.
def expected_place(rating, opponent_ratings):
if len(opponent_ratings) == 0:
raise ValueError("opponent_ratings must have at least one rating.")
return 1 + len(opponent_ratings) - sum(expected_score(rating, opp_rating) for opp_rating in opponent_ratings)
# Update the rating of a ranking of players, where ranks is a list of ratings
# sorted by results: the first element of the list is 1st place, the second is
# 2nd place, and so on. Ratings are returned in the same order, and K factors
# may either be set for all players or individually for each player.
def update_rank_rating(ranks, k=20):
if len(ranks) <= 1:
raise ValueError("The length of ranks must have two ratings or greater.")
if type(k) is list:
if len(k) != len(ranks):
raise ValueError("The length of ranks must be the same as the length of k, or a single k factor should be given.")
# Check if all k are positive.
if sum(1 for individual_k in k if individual_k <= 0) > 0:
raise ValueError("All k factors must be positive.")
else:
if k <= 0:
raise ValueError("k must be positive.")
# Add len(ranks) - 1 elements to k.
k = [k] * len(ranks)
expected = expected_rank_score(ranks)
# Calculate k normalization quotient.
k_norm = len(ranks) - 1
scores = list(range(k_norm, -1, -1))
return [rating + rating_delta(score, individual_expected, individual_k / k_norm) for rating, score, individual_expected, individual_k in zip(ranks, scores, expected, k)]
# Get the base-2 entropy of a Bernoulli(p) distribution.
def bernoulli_entropy(p):
if p <= 0 or p >= 1:
raise ValueError("p must be greater than 0 and less than 1.")
return -(p * math.log2(p) + (1 - p) * math.log2(1 - p))
# Get the fairness of a match between player A and player B, with 0 being the
# least fair and 1 being the most fair.
def fairness(rating_a, rating_b):
return bernoulli_entropy(expected_score(rating_a, rating_b))
# Get the fairness of a match between team A and team B.
def fairness_team(team_a, team_b):
if len(team_a) == 0:
raise ValueError("team_a must have at least one rating.")
if len(team_b) == 0:
raise ValueError("team_b must have at least one rating.")
return bernoulli_entropy(expected_team_score(team_a, team_b))
|
python
|
# -*- coding: utf-8 -*-
import unittest
from gilded_rose import Item, GildedRose
class GildedRoseTest(unittest.TestCase):
def test_foo_quality_never_below_zero(self):
items = [Item("foo", 0, 0)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual("foo", items[0].name)
self.assertEqual(0, items[0].quality)
def test_foo_quality_decreases_by_one(self):
items = [Item("foo", 0, 1)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(0, items[0].quality)
def test_foo_quality_decreases_twice_as_fast_after_sell_date(self):
items = [Item("foo", -1, 2)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(0, items[0].quality)
def test_foo_sellin_decreases_by_one(self):
items = [Item("foo", 1, 1)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(0, items[0].sell_in)
def test_aged_brie_increases_in_quality(self):
items = [Item("Aged Brie", 1, 0)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(1, items[0].quality)
def test_aged_brie_increases_in_quality_up_to_50(self):
items = [Item("Aged Brie", 1, 50)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(50, items[0].quality)
def test_sulfuras_does_not_decrease_in_quality(self):
items = [Item("Sulfuras, Hand of Ragnaros", 1, 10)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(10, items[0].quality)
def test_sulfuras_sellin_does_not_decreases(self):
items = [Item("Sulfuras, Hand of Ragnaros", 1, 1)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(1, items[0].sell_in)
def test_backstage_passes_quality_increases_by_two_ten_days_or_less(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 10, 3)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(5, items[0].quality)
def test_backstage_passes_quality_increases_by_three_five_days_or_less(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 5, 3)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(6, items[0].quality)
def test_backstage_passes_quality_drops_to_zero_after_concert(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 0, 3)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(0, items[0].quality)
if __name__ == '__main__':
unittest.main()
|
python
|
#!/usr/bin/env python
import gpt_2_simple as gpt2
import sys
if len(sys.argv) > 1:
prompt = sys.argv[1]
else:
prompt = "prompt: So, what's new around here?"
print(prompt)
sys.exit(1)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess)
single_text = gpt2.generate(
sess,
return_as_list=True,
temperature=0.75,
include_prefix=False,
truncate="<|endoftext|>",
prefix="""ASCII Today - Fun with the Teletype Terminal"""
)[0]
print(single_text)
|
python
|
# Please refrain from specifying a micro version if possible.
# --------------------------------------------------------------------------- #
VERSION = (1, 1)
# --------------------------------------------------------------------------- #
def _get_version(vt): # pragma: nocover # noqa
vt = tuple(map(str, vt)) # pragma: nocover # noqa
m = map(lambda v: v.startswith(('a', 'b', 'rc')), vt) # pragma: nocover # noqa
try: # pragma: nocover # noqa
i = next(i for i, v in enumerate(m) if v) # pragma: nocover # noqa
except StopIteration: # pragma: nocover # noqa
return '.'.join(vt) # pragma: nocover # noqa
return '.'.join(vt[:i]) + '.'.join(vt[i:]) # pragma: nocover # noqa
__version__ = _get_version(VERSION)
del _get_version
from . import common # noqa
from .common import EncodingType # noqa
from . import asymmetric # noqa
from .asymmetric import * # noqa
from . import x509 # noqa
from .x509 import * # noqa
|
python
|
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run some automations to test things"""
from __future__ import unicode_literals
from __future__ import print_function
import sys
import os.path
import time
try:
from pywinauto import application
except ImportError:
pywinauto_path = os.path.abspath(__file__)
pywinauto_path = os.path.split(os.path.split(pywinauto_path)[0])[0]
sys.path.append(pywinauto_path)
from pywinauto import application
import pywinauto
from pywinauto import tests
#from pywinauto.findbestmatch import MatchError
from pywinauto.timings import Timings
def run_notepad():
"""Run notepad and do some small stuff with it"""
print("Run with option 'language' e.g. notepad_fast.py language to use")
print("application data. This should work on any language Windows/Notepad")
print()
print("Trying fast timing settings - it's possible these won't work")
print("if pywinauto tries to access a window that is not accessible yet")
# use fast timings - but allow to wait for windows a long time
Timings.fast()
Timings.window_find_timeout = 10
start = time.time()
run_with_appdata = False
if len(sys.argv) > 1 and sys.argv[1].lower() == 'language':
run_with_appdata = True
scriptdir = os.path.split(os.path.abspath(__file__))[0]
if run_with_appdata:
print("\nRunning this script so it will load application data and run")
print("against any lanuguage version of Notepad/Windows")
# make sure that the app data gets read from the same folder as
# the script
app = application.Application(
os.path.join(scriptdir, "Notepad_fast.pkl"))
else:
app = application.Application()
## for distribution we don't want to connect to anybodies application
## because we may mess up something they are working on!
#try:
# app.connect_(path = r"c:\windows\system32\notepad.exe")
#except application.ProcessNotFoundError:
# app.start_(r"c:\windows\system32\notepad.exe")
app.start(r"notepad.exe")
app.Notepad.menu_select("File->PageSetup")
# ----- Page Setup Dialog ----
# Select the 4th combobox item
app.PageSetupDlg.SizeComboBox.select(4)
# Select the 'Letter' combobox item or the Letter
try:
app.PageSetupDlg.SizeComboBox.select("Letter")
except ValueError:
app.PageSetupDlg.SizeComboBox.select('Letter (8.5" x 11")')
app.PageSetupDlg.SizeComboBox.select(2)
# run some tests on the Dialog. List of available tests:
# "AllControls",
# "AsianHotkey",
# "ComboBoxDroppedHeight",
# "CompareToRefFont",
# "LeadTrailSpaces",
# "MiscValues",
# "Missalignment",
# "MissingExtraString",
# "Overlapping",
# "RepeatedHotkey",
# "Translation",
# "Truncation",
bugs = app.PageSetupDlg.run_tests('RepeatedHotkey Truncation')
# if there are any bugs they will be printed to the console
# and the controls will be highlighted
tests.print_bugs(bugs)
# ----- Next Page Setup Dialog ----
app.PageSetupDlg.Printer.click()
# do some radio button clicks
# Open the Connect to printer dialog so we can
# try out checking/unchecking a checkbox
app.PageSetupDlg.Network.click()
# ----- Connect To Printer Dialog ----
# Select a checkbox
app.ConnectToPrinter.ExpandByDefault.check()
app.ConnectToPrinter.ExpandByDefault.uncheck()
# try doing the same by using click
app.ConnectToPrinter.ExpandByDefault.click()
app.ConnectToPrinter.ExpandByDefault.click()
# close the dialog
app.ConnectToPrinter.Cancel.close_click()
# ----- 2nd Page Setup Dialog again ----
app.PageSetupDlg.Properties.click()
doc_props = app.window(name_re=".*Properties$")
doc_props.wait('exists', timeout=40)
#
# # ----- Document Properties Dialog ----
# # some tab control selections
# # Two ways of selecting tabs with indices...
# doc_props.TabCtrl.select(0)
# doc_props.TabCtrl.select(1)
# try:
# doc_props.TabCtrl.select(2)
# except IndexError:
# # not all users have 3 tabs in this dialog
# pass
#
# # or with text...
# #doc_props.TabCtrl.select("PaperQuality")
# doc_props.TabCtrl.select(1)
#
# try:
# #doc_props.TabCtrl.select("JobRetention")
# doc_props.TabCtrl.select("3")
# except MatchError:
# # some people do not have the "Job Retention" tab
# pass
#
# doc_props.TabCtrl.select("Finishing")
# #doc_props.TabCtrl.select(0)
#
# # do some radio button clicks
# doc_props.RotatedLandscape.click()
# doc_props.BackToFront.click()
# doc_props.FlipOnShortEdge.click()
#
# doc_props.Portrait.click()
# doc_props._None.click()
# #doc_props.FrontToBack.click()
#
# # open the Advanced options dialog in two steps
# advbutton = doc_props.Advanced
# advbutton.click()
#
# # close the 4 windows
#
# # ----- Advanced Options Dialog ----
# app.window(name_re = ".* Advanced Options").Ok.click()
# ----- Document Properties Dialog again ----
doc_props.Cancel.close_click()
# for some reason my current printer driver
# window does not close cleanly :(
if doc_props.Cancel.Exists():
doc_props.OK.close_click()
# ----- 2nd Page Setup Dialog again ----
app.PageSetupDlg.OK.close_click()
# ----- Page Setup Dialog ----
app.PageSetupDlg.Ok.close_click()
# type some text - note that extended characters ARE allowed
app.Notepad.Edit.set_edit_text(u"I am typing s\xe4me text to Notepad\r\n\r\n"
"And then I am going to quit")
app.Notepad.Edit.right_click()
app.Popup.menu_item("Right To Left Reading Order").click()
#app.PopupMenu.menu_select("Paste", app.Notepad.ctrl_())
#app.Notepad.Edit.right_click()
#app.PopupMenu.menu_select(
# "Right To Left Reading Order", app.Notepad.ctrl_())
#app.PopupMenu.menu_select(
# "Show unicode control characters", app.Notepad.ctrl_())
#time.sleep(1)
#app.Notepad.Edit.right_click()
#app.PopupMenu.menu_select("Right To Left Reading Order", app.Notepad.ctrl_())
#time.sleep(1)
#app.Notepad.Edit.right_click()
#app.PopupMenu.menu_select(
# "Insert Unicode control character -> IAFS", app.Notepad.ctrl_())
#time.sleep(1)
#app.Notepad.Edit.type_keys("{ESC}")
# the following shows that Sendtext does not accept
# accented characters - but does allow 'control' characters
app.Notepad.Edit.type_keys(u"{END}{ENTER}SendText d\xf6\xe9s "
u"s\xfcpp\xf4rt \xe0cce\xf1ted characters!!!", with_spaces = True)
# Try and save
app.Notepad.menu_select("File->SaveAs")
app.SaveAs.EncodingComboBox.select("UTF-8")
app.SaveAs.FileNameEdit.set_edit_text("Example-utf8.txt")
app.SaveAs.Save.close_click()
# my machine has a weird problem - when connected to the network
# the SaveAs Dialog appears - but doing anything with it can
# cause a LONG delay - the easiest thing is to just wait
# until the dialog is no longer active
# - Dialog might just be gone - because click worked
# - dialog might be waiting to disappear
# so can't wait for next dialog or for it to be disabled
# - dialog might be waiting to display message box so can't wait
# for it to be gone or for the main dialog to be enabled.
# while the dialog exists wait upto 30 seconds (and yes it can
# take that long on my computer sometimes :-( )
app.SaveAsDialog2.Cancel.wait_not('enabled')
# If file exists - it asks you if you want to overwrite
try:
app.SaveAs.Yes.wait('exists').close_click()
except pywinauto.MatchError:
print('Skip overwriting...')
# exit notepad
app.Notepad.menu_select("File->Exit")
if not run_with_appdata:
app.WriteAppData(os.path.join(scriptdir, "Notepad_fast.pkl"))
print("That took %.3f to run"% (time.time() - start))
if __name__ == "__main__":
run_notepad()
|
python
|
# Make sure to have CoppeliaSim running, with followig scene loaded:
#
# scenes/messaging/ikMovementViaRemoteApi.ttt
#
# Do not launch simulation, then run this script
from zmqRemoteApi import RemoteAPIClient
print('Program started')
client = RemoteAPIClient()
sim = client.getObject('sim')
tipHandle = sim.getObject('/LBR4p/tip')
targetHandle = sim.getObject('/LBR4p/target')
# Set-up some movement variables:
maxVel = 0.1
maxAccel = 0.01
maxJerk = 80
# Start simulation:
sim.startSimulation()
def cb(pose,vel,accel,handle):
sim.setObjectPose(handle,-1,pose)
# Send movement sequences:
initialPose = sim.getObjectPose(tipHandle,-1)
targetPose = [0, 0, 0.85, 0, 0, 0, 1]
sim.moveToPose(-1,initialPose,[maxVel],[maxAccel],[maxJerk],targetPose,cb,targetHandle,[1,1,1,0.1])
targetPose = [
0, 0, 0.85,
-0.7071068883, -6.252754758e-08, -8.940695295e-08, -0.7071067691
]
sim.moveToPose(-1,sim.getObjectPose(tipHandle,-1),[maxVel],[maxAccel],[maxJerk],targetPose,cb,targetHandle,[1,1,1,0.1])
sim.moveToPose(-1,sim.getObjectPose(tipHandle,-1),[maxVel],[maxAccel],[maxJerk],initialPose,cb,targetHandle,[1,1,1,0.1])
sim.stopSimulation()
print('Program ended')
|
python
|
import attr
from .document import Document
from .has_settings import HasSettings
from .templated import Templated
import exam_gen.util.logging as logging
log = logging.new(__name__, level="DEBUG")
@attr.s
class GradeData():
points = attr.ib(default=None)
children = attr.ib(factory=dict)
comment = attr.ib(default=None, kw_only = True)
ungraded_points = attr.ib(default=None, init=False)
weighted_points = attr.ib(default=None, init=False)
total_weight = attr.ib(default=None, init=False)
@property
def percent_grade(self):
return (self.weighted_points / self.total_weight)
@property
def percent_ungraded(self):
return (self.ungraded_points / self.total_weight)
@staticmethod
def normalise(data):
if isinstance(data, GradeData):
return data
elif isinstance(data, dict):
return GradeData(children=data)
else:
return GradeData(grade=data)
def merge(self, other):
other = GradeData.normalize(other)
if other.grade != None:
self.grade = other.grade
self.format = other.format
for (name, child) in other.children.items():
if name in self.children:
self.children[name] = GradeData.normalise(
self.children[name]).merge(child)
else:
self.children[name] = GradeData.normalize(child)
@attr.s
class Gradeable(Templated):
_weight = attr.ib(default=None, kw_only=True)
_points = attr.ib(default=None, init=False)
_comment = attr.ib(default=None, init=False)
settings.new_group(
"grade", doc=
"""
Settings covering how grades are managed for this problem.
""")
settings.grade.new_value(
"max_points", default=1, doc=
"""
The maximum number of points that can be assigned to problem
""")
settings.grade.new_value(
"weight", default=None, doc=
"""
The weight of this problem relative to others in exam. If `None`, this
is assumed to be the same as `settings.grade.max_points`.
""")
def __attrs_post_init__(self):
if hasattr(super(Gradeable,self), '__attrs_post_init__'):
super(Gradeable,self).__attrs_post_init__()
# stupid way of sneaking an init parameter into the settings
if self._weight != None:
self.settings.grade.weight = self._weight
# need this for a semi-responsive default setting
if self.settings.grade.weight == None:
self.settings.grade.weight = self.settings.grade.max_points
def set_points(self, points, comment=None):
if len(self.questions) > 0:
raise RuntimeError("Cannot assign grade to doc with sub-questions")
if points != None:
self._points = points
if self._points > self.settings.grade.max_points:
raise RuntimeError("Assigned grade larger than max_points allowed")
if comment != None:
self._comment = comment
@property
def ungraded(self):
return self._points == None
@property
def percent_grade(self):
"""
returns a grade from between 0 and 1
"""
return (self._points / self.settings.grade.max_points)
@property
def weighted_grade(self):
"""
returns a grade after weighting
"""
return (self.settings.grade.weight * self.percent_grade)
@property
def total_weight(self):
return self.settings.grade.weight
def build_template_spec(self, build_info):
spec = super(Gradeable, self).build_template_spec(
build_info)
grades = dict()
if self._points != None:
grades['points'] = self._points
if self._comment != None:
grades['comment'] = self._comment
if grades != {}:
spec.context['grade'] = grades
return spec
def distribute_scores(obj , grades):
"""
Takes a document and splits out all the grade information in an
`GradeData` to it's children.
"""
# Check if valid
if not isinstance(obj, Document):
raise RuntimeError("Can't distribute grades to non-document")
# for convinience allow the user to supply grades or points directly
grades = GradeData.normalize(grades)
# Copy out basic grades
if isinstance(obj, Gradeable):
obj.set_points(grades.points, comment=grade.comment)
elif grades.points != None:
raise RuntimeError("Trying to set grade on non-gradeable doc.")
# apply to children
for (name, sub_q) in obj.questions.items():
if name in grades.children:
distribute_grades(sub_q, grades.children[name])
# get extra keys and throw error if any
extra = [k for k in grades.children.keys() if k not in obj.questions]
if len(extra) != 0:
raise RuntimeError(
"Tried to supply grades for non-existent children : ".format(
extra
))
def collect_grades(obj):
"""
Goes through a document and gathers the grade info from all the
sub-elements, keeping track of grade and weight
"""
grade_data = GradeData()
# check if valid
if not isinstance(obj, Document):
raise RuntimeError("Can't gather grades from non-document")
if isinstance(obj, Gradeable):
grade_data.points = obj._points
grade_data.comment = obj._comment
# Either sum up the information from the sub-questions
if len(obj.questions) != 0:
grade_data.ungraded_points = 0
grade_data.weighted_points = 0
grade_data.total_weight = 0
for (name, sub_q) in obj.questions.items():
sub_data = collect_grades(sub_q)
grade_data.children[name] = sub_data
grade_data.total_weight += sub_data.total_weight
grade_data.ungraded_points += sub_data.ungraded_points
grade_data.weighted_points += sub_data.weighted_points
# or just use the leaf question's data
else:
grade_data.total_weight = obj.total_weight
if obj.ungraded:
grade_data.weighted_points = 0
grade_data.ungraded_points = obj.total_weight
else:
grade_data.weighted_points = obj.weighted_grade
grade_data.ungraded_points = 0
return grade_data
|
python
|
import sys
def op(arg1, arg2):
if (len(sys.argv) != 3):
raise Exception("InputError: only numbers\n\n")
if (arg1.isdigit() and arg2.isdigit()):
arg1 = int(arg1)
arg2 = int(arg2)
else:
raise Exception("InputError: only numbers\n\n")
print("Sum: ", arg1 + arg2)
print("Difference: ", arg1 - arg2)
print("Product: ", arg1 * arg2)
try:
print("Quotient: ", arg1 / arg2)
except Exception as e:
print ("Quotient: ERROR (", e, ")")
try:
print("Remainder: ", arg1 % arg2)
except Exception as e:
print ("Remainder: ERROR (", e, ")")
try:
op(sys.argv[1], sys.argv[2])
except IndexError:
print("Usage: python3 operations.py <number1> <number2> Example:\n\tpython3 operations.py 10 3")
except Exception as e:
print(e, "Usage: python3 operations.py <number1> <number2> Example:\n\tpython3 operations.py 10 3")
|
python
|
def xprop(layout, data, prop, enabled=True, **kwargs):
attrs = getattr(data.bl_rna, prop)[1]
name = attrs.get('name', prop)
lay = layout.row().split(percentage=0.33)
lay.label(name + ':')
lay = lay.row(align=True)
lay_l = lay.row(align=True)
lay_r = lay
if not enabled:
lay = lay.split(align=True)
lay.enabled = False
lay.prop(data, prop, text='', **kwargs)
return lay_l, lay_r
|
python
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='pyledsign',
version='1.01',
description='pyledsign - control led signs from python',
author='Kerry Schwab',
author_email='[email protected]',
url='http://www.python.org/tbd/',
packages=['pyledsign'],
)
|
python
|
from django.conf import settings
from django.http import Http404
from django.shortcuts import redirect, render
from .models import Link
def redirect_(request, key):
try:
link = Link.find_by_key(key.lower())
except Link.DoesNotExist:
raise Http404("Link does not exist.")
return redirect(link.url, permanent=settings.PERMANENT_REDIRECT)
def homepage(request):
return render(request, "homepage.html")
|
python
|
# Generated by Django 2.1 on 2018-08-08 04:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Email',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.PositiveSmallIntegerField(choices=[(1, 'Pending'), (2, 'Sent'), (3, 'Failed'), (4, 'Cancelled')], default=1)),
('status_updated', models.DateTimeField()),
('queued_until', models.DateTimeField(blank=True, null=True)),
('email_type', models.CharField(max_length=191)),
('sent_from', models.CharField(max_length=255)),
('subject', models.CharField(max_length=255)),
('recipients', models.TextField()),
('cc_to', models.TextField(blank=True, default='')),
('bcc_to', models.TextField(blank=True, default='')),
('reply_to', models.TextField(blank=True, default='')),
('text', models.TextField()),
('html', models.TextField(blank=True, default='')),
('error_message', models.TextField(blank=True, default='')),
('task_scheduler_id', models.CharField(blank=True, db_index=True, default='', editable=False, max_length=255)),
('related_obj_id', models.PositiveIntegerField(blank=True, editable=False, null=True)),
('related_obj_content_type', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, to='contenttypes.ContentType')),
],
options={
'ordering': ('-status_updated',),
},
),
]
|
python
|
"""Tests the DNC class implementation."""
import sonnet as snt
import tensorflow as tf
import unittest
from numpy.testing import assert_array_equal
from .. dnc import dnc
def suite():
"""Create testing suite for all tests in this module."""
suite = unittest.TestSuite()
suite.addTest(DNCTest('test_construction'))
return suite
class DNCTest(unittest.TestCase):
"""Tests for the DNC class."""
def test_construction(self):
"""Test the construction of a DNC."""
output_size = 10
d = dnc.DNC(output_size)
self.assertIsInstance(d, dnc.DNC)
def test_build(self):
"""Test the build of the DNC."""
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as sess:
output_size = 10
memory_size = 20
word_size = 8
num_read_heads = 3
hidden_size = 1
tests = [{ # batch_size = 1
'input': [[1, 2, 3]],
'batch_size': 1
}, { # batch_size > 1
'input': [[1, 2, 3], [4, 5, 6]],
'batch_size': 2,
}, { # can handle 2D input with batch_size > 1
'input': [[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]],
[[9, 8, 7],
[6, 5, 4],
[3, 2, 1]]],
'batch_size': 2,
}, { # 3D input with batch_size > 1
'input': [[[[1], [2]], [[3], [4]]],
[[[5], [6]], [[7], [8]]]],
'batch_size': 2,
}]
for test in tests:
i = tf.constant(test['input'], dtype=tf.float32)
batch_size = test['batch_size']
d = dnc.DNC(
output_size,
memory_size=memory_size,
word_size=word_size,
num_read_heads=num_read_heads,
hidden_size=hidden_size)
prev_state = d.initial_state(batch_size, dtype=tf.float32)
output_vector, dnc_state = d(i, prev_state)
assert_array_equal([batch_size, output_size],
sess.run(tf.shape(output_vector)))
assert_array_equal(
[batch_size, num_read_heads, word_size],
sess.run(tf.shape(dnc_state.read_vectors)))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
python
|
# Source : https://leetcode.com/problems/lowest-common-ancestor-of-a-binary-tree/
# Author : henrytine
# Date : 2020-08-19
#####################################################################################################
#
# Given a binary tree, find the lowest common ancestor (LCA) of two given nodes in the tree.
#
# According to the definition of LCA on Wikipedia: "The lowest common ancestor is defined between two
# nodes p and q as the lowest node in T that has both p and q as descendants (where we allow a node
# to be a descendant of itself).”
#
# Given the following binary tree: root = [3,5,1,6,2,0,8,null,null,7,4]
#
# Example 1:
#
# Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 1
# Output: 3
# Explanation: The LCA of nodes 5 and 1 is 3.
#
# Example 2:
#
# Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 4
# Output: 5
# Explanation: The LCA of nodes 5 and 4 is 5, since a node can be a descendant of itself according to
# the LCA definition.
#
# Note:
#
# All of the nodes' values will be unique.
# p and q are different and both values will exist in the binary tree.
#
#####################################################################################################
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if root in (None, p, q): return root
left = self.lowestCommonAncestor(root.left, p, q)
right = self.lowestCommonAncestor(root.right, p, q)
if left is None:
return right
elif right is None:
return left
else:
return root
# return self.helper(root, p, q)
# def helper(self, node, p, q):
# if node in (None, p, q):
# return node
# left = self.helper(node.left, p, q)
# right = self.helper(node.right, p, q)
# if left is None:
# return right
# elif right is None:
# return left
# else:
# return node
|
python
|
import pymysql
import urllib.request
from bs4 import BeautifulSoup
import requests
def connectDatabase():
"""Create database connection"""
global db
db = pymysql.connect(host='localhost', user='root', password='',
db='vg_dapi', cursorclass=pymysql.cursors.DictCursor,charset='utf8')
def getappid(appid_games_list, name):
""" Function responsable to get the App ID of a game, given a name"""
for i in appid_games_list:
if i['name'] == name:
print(name + " App ID: " + str(i['appid']))
return i['appid']
def getgameinfo(urlsteam, appid, vgnamesteam):
pageurl = urllib.request.Request(urlsteam + str(appid))
#Query the website and return the html to the variable 'page'
page = urllib.request.urlopen(pageurl)
#Parse the html in the 'page' variable, and store it in Beautiful Soup format
soup = BeautifulSoup(page, "lxml")
reviews = soup.find('span', class_='nonresponsive_hidden responsive_reviewdesc')
if reviews is None:
pass
else:
vgsteamscores_list = [appid, reviews.text, vgnamesteam]
vgsteamscores_sql = "UPDATE `gameplatform` SET `steamID` = %s, `steam_score` = %s WHERE (SELECT `id` FROM `game` WHERE `name` = %s) = `gameID`"
cur.execute(vgsteamscores_sql, vgsteamscores_list)
db.commit()
if __name__ == '__main__':
url = "http://store.steampowered.com/app/"
#request responsable to return a json object with all the steam games
r = requests.get('https://api.steampowered.com/ISteamApps/GetAppList/v2/')
#store appID and Names of the games into a List
gameslist = r.json()['applist']['apps']
connectDatabase()
cur = db.cursor()
cur.execute("SELECT name FROM game")
vgnames_list = cur.fetchall()
for vgname in vgnames_list:
if getappid(gameslist, vgname['name']) is None:
pass
else:
appidgame = getappid(gameslist, vgname['name'])
getgameinfo(url, appidgame, vgname['name'])
|
python
|
from mycroft import MycroftSkill, intent_file_handler
class RoomBooking(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
@intent_file_handler('booking.room.intent')
def handle_booking_room(self, message):
amount = message.data.get('amount')
building = message.data.get('building')
time = message.data.get('time')
self.speak_dialog('booking.room', data={
'time': time,
'amount': amount,
'building': building
})
def create_skill():
return RoomBooking()
|
python
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
class DialogEvent:
def __init__(self, bubble: bool = False, name: str = "", value: object = None):
self.bubble = bubble
self.name = name
self.value: object = value
|
python
|
import traceback
from twisted.internet import reactor
def stack():
print("The Python Stack.")
traceback.print_stack()
reactor.callWhenRunning(stack)
reactor.run()
|
python
|
import os
import sys
import codecs
import difflib
sys.path.insert(0, os.path.dirname(__file__))
from logger import log
def restore_file_case(text_file, orig_file, debug=False):
text_io = codecs.open(text_file, 'r', encoding='utf8')
orig_io = codecs.open(orig_file, 'r', encoding='utf8')
for line in text_io:
orig_line = orig_io.next()
result = restore_sentence_case(line.strip(), orig_line.strip(), debug)
assert result.lower() == line.strip().lower(), \
"Case restoration changed a sentence!\n{}\n{}" \
.format(line.strip(), result)
yield result.encode('utf8', 'replace')
text_io.close()
orig_io.close()
def restore_sentence_case(sent, orig_sent, debug=False):
if debug and sent != orig_sent:
log.debug(u'toks: {}'.format(sent).encode('utf8', 'replace'))
log.debug(u'orig: {}'.format(orig_sent).encode('utf8', 'replace'))
toks = sent.split()
orig_toks = orig_sent.split()
lc_toks = [tok.lower() for tok in toks]
lc_orig_toks = [tok.lower() for tok in orig_toks]
matcher = difflib.SequenceMatcher(None, lc_toks, lc_orig_toks)
new_toks = []
for tag, i1, i2, j1, j2 in matcher.get_opcodes():
if debug and tag != 'equal' and sent != orig_sent:
log.debug(u" {}: ({},{}) '{}' -> ({},{}) '{}'" \
.format(tag,
i1, i2, ' '.join(toks[i1:i2]),
j1, j2, ' '.join(orig_toks[j1:j2])) \
.encode('utf8', 'replace'))
if tag == 'equal':
new_toks += orig_toks[j1:j2]
elif tag == 'replace':
word = ' '.join(toks[i1:i2])
orig_word = ' '.join(orig_toks[j1:j2])
new_toks += [restore_word_case(word, orig_word)]
elif tag == 'delete':
if i1 == 0:
tmp = toks[i1:i2]
if is_capitalized(orig_toks[0]):
orig_toks[0] = orig_toks[0].lower()
tmp[0] = tmp[0].capitalize()
elif is_uppercased(orig_toks[0]):
tmp[0] = tmp[0].capitalize()
new_toks += tmp
else:
new_toks += toks[i1:i2]
elif tag == 'insert':
if i1 == 0 and is_capitalized(orig_toks[j1]) and \
is_lowercased(orig_toks[j2]):
orig_toks[j2] = orig_toks[j2].capitalize()
new_sent = ' '.join(new_toks)
if debug and sent != orig_sent:
log.debug("sent: {}".format(new_sent))
return new_sent
def restore_word_case(tok, orig_tok):
if tok.lower() == orig_tok.lower():
return orig_tok
if is_lowercased(orig_tok):
return tok.lower()
elif is_uppercased(orig_tok):
return tok.upper()
elif is_capitalized(orig_tok):
return tok.capitalize()
else:
return tok
def is_lowercased(tok):
return tok == tok.lower()
def is_uppercased(tok):
return tok == tok.upper()
def is_capitalized(tok):
return tok == tok.capitalize()
|
python
|
"""Test for our weighted graph."""
# {'A': {'B': 7, 'C': 9}, 'B': {'D': 2, 'E': 4}, 'C': {'F':6}}
"""Test our graph implementation."""
import pytest
from weighted_graph import Weighted
@pytest.fixture
def new_weighted_graph():
"""Graph for testing."""
from weighted_graph import Weighted
empty_graph = Weighted()
return empty_graph
@pytest.fixture
def graph_no_edges():
"""Test graph with nodes only."""
from weighted_graph import Weighted
example_graph = Weighted()
example_graph.add_node('BB')
example_graph.add_node(82)
example_graph.add_node(99)
example_graph.add_node('AA')
return example_graph
@pytest.fixture
def graph_with_edges():
"""Test graph with nodes only."""
from weighted_graph import Weighted
new_graph = Weighted()
new_graph.add_node('A')
new_graph.add_node('B')
new_graph.add_node('C')
new_graph.add_node('D')
new_graph.add_node('E')
new_graph.add_node('F')
new_graph.add_edge('A', 'B', 7)
new_graph.add_edge('A', 'C', 9)
new_graph.add_edge('B', 'D', 2)
new_graph.add_edge('B', 'E', 4)
new_graph.add_edge('C', 'F', 6)
return new_graph
def test_graph_init_no_values_taken():
"""Ensure we raise an error if we try to init with a value."""
from weighted_graph import Weighted
with pytest.raises(TypeError):
a_graph = Weighted(2)
def test_graph_init_success(new_weighted_graph):
"""Ensure our new graph is in fact a graph."""
assert isinstance(new_weighted_graph, Weighted)
def test_graph_adds_and_lists_nodes(graph_no_edges):
"""Ensure we get list of nodes."""
listy = ['BB', 82, 99, 'AA']
for node in listy:
assert node in graph_no_edges.nodes()
def test_graph_adds_nodes_and_edges(graph_no_edges):
"""Ensure we add edges to the nodes."""
graph_no_edges.add_edge('Louisiana Crawfish', 'WA Invasive Species', 3)
assert graph_no_edges.edges() == [(
'Louisiana Crawfish', 'WA Invasive Species', 3)]
def test_graph_lists_adds_and_lists_edges(graph_no_edges):
"""Ensure we add edges to the nodes."""
graph_no_edges.add_edge(82, 34, 4)
graph_no_edges.add_edge(99, 'AA', 6)
assert (82, 34, 4) in graph_no_edges.edges()
assert (99, 'AA', 6) in graph_no_edges.edges()
def test_graph_deletes_nodes(graph_with_edges):
"""Ensure we can delete a node."""
graph_with_edges.del_nodes('B')
listy = ['A', 'C', 'D', 'E', 'F']
for node in listy:
assert node in graph_with_edges.nodes()
assert 'B' not in graph_with_edges.nodes()
def test_graph_cant_delete_an_unpresent_node(graph_no_edges):
"""Ensure we can't delete that doesn't exist."""
with pytest.raises(ValueError):
graph_no_edges.del_nodes(3.14)
def test_graph_cant_delete_without_argument(graph_no_edges):
"""Ensure we can't delete without an argument."""
with pytest.raises(TypeError):
graph_no_edges.del_nodes()
def test_del_some_edges(graph_with_edges):
"""Ensure we delete edges."""
graph_with_edges.del_edges('A', 'B')
assert graph_with_edges['A'] == {'C': 9}
def test_cant_delete_nonexistent_edge(graph_with_edges):
"""Ensure we can't delete a nonexistent edge."""
with pytest.raises(KeyError):
graph_with_edges.del_edges('BB', 'Badgers')
def test_nodes_exist(graph_no_edges):
"""Ensure we can assert nodes are in a graph."""
for node in graph_no_edges:
assert graph_no_edges.has_node(node)
def test_false_if_no_node(graph_no_edges):
"""Ensure we get false."""
false_nodes = ['land submarine', 'Portland Timbers', 'tug cable scope', 100]
for node in false_nodes:
assert graph_no_edges.has_node(node) is False
def test_node_neighbors(graph_no_edges):
"""Ensure we get the right neighbors for a node."""
graph_no_edges.add_edge('BB', 82, 5)
assert graph_no_edges.neighbors('BB') == {82: 5}
def test_node_without_neighbors(graph_no_edges):
"""Ensure we get None back for neighbors."""
assert graph_no_edges.neighbors(99) == {}
def test_node_error_if_nonpresent(graph_no_edges):
"""Can not get neighbors of nonpresent node."""
with pytest.raises(ValueError):
graph_no_edges.adjacent('Raccoon', 'Rocket')
def test_adjacent_nodes(graph_with_edges):
"""Ensure we get adjacent edges."""
assert graph_with_edges.adjacent('A', 'B')
def test_adjacent_none(graph_with_edges):
"""Ensure we get false."""
assert graph_with_edges.adjacent('B', 'A') is False
def test_adjacent_unpresent(graph_with_edges):
"""Ensure we get an error."""
with pytest.raises(ValueError):
graph_with_edges.adjacent('Captain Picard', 'Star Wars')
def test_add_node_value_error_val_exists(graph_no_edges):
"""Ensure a value is not added twice."""
with pytest.raises(ValueError):
graph_no_edges.add_node('BB')
def test_del_edges_has_no_edges_to_delete(graph_with_edges):
"""Ensure there are no edges to delete."""
with pytest.raises(KeyError):
graph_with_edges.del_edges('F', 'G')
def test_neighbors_value_error_not_in_graph(graph_with_edges):
"""Ensure the value error raises if no neighbors."""
with pytest.raises(ValueError):
graph_with_edges.neighbors('G')
@pytest.fixture
def dijkstra_alg():
"""Test dijkstra method."""
from weighted_graph import Weighted
new_graph = Weighted()
new_graph.add_node('0')
new_graph.add_node('1')
new_graph.add_node('2')
new_graph.add_node('3')
new_graph.add_node('4')
new_graph.add_node('5')
new_graph.add_edge('0', '1', 1)
new_graph.add_edge('0', '2', 7)
new_graph.add_edge('1', '3', 9)
new_graph.add_edge('1', '5', 15)
new_graph.add_edge('2', '4', 4)
new_graph.add_edge('3', '5', 5)
new_graph.add_edge('3', '4', 10)
new_graph.add_edge('4', '5', 3)
return new_graph
def test_new_graph_returns_path_to_nodes(dijkstra_alg):
"""Test that the key value pairs are correct."""
assert dijkstra_alg.dijkstra('0') == {'1': 1, '2': 7, '3': 10, '4': 11, '5': 14}
def test_new_graph_returns_path_to_other_nodes(graph_with_edges):
"""Test that the key value pairs are correct."""
assert graph_with_edges.dijkstra('A') == {'B': 7, 'C': 9, 'D': 9, 'E': 11, 'F': 15}
def test_graph_with_nodes_pointing_at_each_other():
"""."""
from weighted_graph import Weighted
new_weighted = Weighted()
new_weighted.add_node('A')
new_weighted.add_node('B')
new_weighted.add_node('C')
new_weighted.add_node('D')
new_weighted.add_node('E')
new_weighted.add_node('F')
new_weighted.add_edge('A', 'B', 7)
new_weighted.add_edge('B', 'C', 9)
new_weighted.add_edge('B', 'E', 4)
new_weighted.add_edge('E', 'D', 2)
new_weighted.add_edge('D', 'C', 2)
new_weighted.add_edge('C', 'F', 6)
new_weighted.add_edge('C', 'A', 1)
assert new_weighted.dijkstra('A') == {'B': 7, 'E': 11, 'D': 13, 'C': 15, 'F': 21}
def test_dijkstra_indext_error_raises(dijkstra_alg):
"""Ensure that index error raises for no node in graph."""
with pytest.raises(IndexError):
dijkstra_alg.dijkstra('7')
def test_bellman_ford_first_test_one():
"""Ensure we get same values as dijkstras."""
from weighted_graph import Weighted
new_weighted = Weighted()
new_weighted.add_node('A')
new_weighted.add_node('B')
new_weighted.add_node('C')
new_weighted.add_node('D')
new_weighted.add_node('E')
new_weighted.add_node('F')
new_weighted.add_edge('A', 'B', 7)
new_weighted.add_edge('B', 'C', 9)
new_weighted.add_edge('B', 'E', 4)
new_weighted.add_edge('E', 'D', 2)
new_weighted.add_edge('D', 'C', 2)
new_weighted.add_edge('C', 'F', 6)
new_weighted.add_edge('C', 'A', 1)
assert new_weighted.bellman_ford('A') == {'A': 0, 'B': 7, 'E': 11, 'D': 13, 'C': 15, 'F': 21}
# {'A': {'B': 7, 'C': 9}, 'B': {'D': 2, 'E': 4}, 'C': {'F': 6}}
def test_bellman_ford_first_test_two(dijkstra_alg):
"""Ensure we get same values as dijkstras."""
assert dijkstra_alg.bellman_ford('0') == {'0': 0, '1': 1, '2': 7, '3': 10, '4': 11, '5': 14}
# {'A': {'B': 7, 'C': 9}, 'B': {'D': 2, 'E': 4}, 'C': {'F': 6}}
def test_bellman_ford_with_negatives_one():
"""Ensure bellman works with negatives."""
from weighted_graph import Weighted
weighted = Weighted()
weighted.add_node('S')
weighted.add_node('E')
weighted.add_node('A')
weighted.add_node('D')
weighted.add_node('B')
weighted.add_node('C')
weighted.add_edge('S', 'E', 8)
weighted.add_edge('S', 'A', 10)
weighted.add_edge('E', 'D', 1)
weighted.add_edge('D', 'A', -4)
weighted.add_edge('D', 'C', -1)
weighted.add_edge('A', 'C', 2)
weighted.add_edge('C', 'B', -2)
weighted.add_edge('B', 'A', 1)
assert weighted.bellman_ford('S') == {'A': 5, 'B': 5, 'C': 7, 'D': 9, 'E': 8, 'S': 0}
def test_bellman_with_negatives_two():
"""Ensure it works with various cases of negatives."""
from weighted_graph import Weighted
weighted = Weighted()
weighted.add_node(0)
weighted.add_node(1)
weighted.add_node(2)
weighted.add_node(3)
weighted.add_node(4)
weighted.add_node(5)
weighted.add_edge(0, 1, 5)
weighted.add_edge(0, 2, 3)
weighted.add_edge(1, 3, 7)
weighted.add_edge(2, 3, -2)
weighted.add_edge(3, 0, 8)
weighted.add_edge(3, 4, 3)
weighted.add_edge(4, 5, 6)
weighted.add_edge(0, 5, 4)
assert weighted.bellman_ford(0) == {0: 0, 1: 5, 2: 3, 3: 1, 4: 4, 5: 4}
|
python
|
import os.path
from unittest import TestCase
from pkg_resources import require, DistributionNotFound
from subprocess import call
from sys import platform, executable, exit
from src.info import AppInfo
try:
REQUIRED = open(os.path.join(AppInfo.root_dir, "requirements.txt")).read()
except Exception as e:
raise Exception(
f"Failed to locate requirements file. Maybe it was deleted?\n\n{str(e)}"
)
class Requirements(TestCase):
"""
Instance, solely here to ensure that all necessary
dependencies are installed.
"""
def test_req(self):
missing = []
requirements = self.extract_req(REQUIRED)
for _requirement in requirements:
_requirement = str(_requirement).strip()
with self.subTest(requirement=_requirement):
try:
require(_requirement)
except DistributionNotFound:
missing.append(_requirement)
return missing
def install_reqs(self, missing):
acceptable = {"y", "n", "yes", "no"}
answer = input(
"\n\033[96mDo you wish to install the aforementioned missing packages? [y/n]:\033[0m "
)
if answer.lower() in acceptable:
if "y" in answer.lower():
print("\n\n")
for missed in missing:
self.req(missed, acceptable)
print("\n\033[92mSuccessfully installed required dependencies!\033[0m")
else:
print("Exited successfully.")
exit(0)
def req(self, requirement, acceptable, heading=""):
if not heading:
heading = "\033[4m\033[91mNOTE: This is not an optional package."
ans = input(
f'{heading}\033[0m\033[96m\nAre you sure you want to install "{requirement}"? [y/n]:\033[0m '
)
if ans.lower() in acceptable:
if "y" in ans.lower():
call([executable, "-m", "pip", "install", requirement])
print("\n\n")
else:
print("\n")
extra = (
"\033[1m\033[91mThis package is not optional.\033[0m"
+ "\033[1m\033[91m You must install it.\033[0m"
)
self.req(requirement, acceptable, heading=extra)
else:
invalid = (
"\n\033[1m\033[91mInvalid option. "
+ 'Please use only "yes", "no", "y" or "n" to answer.'
)
self.req(requirement, acceptable, heading=invalid)
def extract_req(self, requirements):
deps = []
for requirement in [
r for r in requirements.split("\n") if r and r != " " and not "#" in r
]:
# Requirement, conditions
r, c = requirement.split(";")
sys_platform = ""
if "sys_platform" in c.lower():
sys_platform = c.split("sys_platform == ")[1][:-1].split("'")[1]
if sys_platform and not platform.lower() == sys_platform:
continue
deps.append(r)
return deps
|
python
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from typing import Any, Callable, Dict, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DataLoader, Sampler
from flash.core.data.io.input import DataKeys, Input
from flash.core.model import Task
from flash.core.registry import FlashRegistry
from flash.core.utilities.apply_func import get_callable_dict
from flash.core.utilities.types import LOSS_FN_TYPE, LR_SCHEDULER_TYPE, METRICS_TYPE, OPTIMIZER_TYPE
from flash.pointcloud.detection.backbones import POINTCLOUD_OBJECT_DETECTION_BACKBONES
__FILE_EXAMPLE__ = "pointcloud_detection"
class PointCloudObjectDetector(Task):
"""The ``PointCloudObjectDetector`` is a :class:`~flash.core.classification.ClassificationTask` that classifies
pointcloud data.
Args:
num_classes: The number of classes (outputs) for this :class:`~flash.core.model.Task`.
backbone: The backbone name (or a tuple of ``nn.Module``, output size) to use.
backbone_kwargs: Any additional kwargs to pass to the backbone constructor.
loss_fn: The loss function to use. If ``None``, a default will be selected by the
:class:`~flash.core.classification.ClassificationTask` depending on the ``multi_label`` argument.
optimizer: Optimizer to use for training.
lr_scheduler: The LR scheduler to use during training.
metrics: Any metrics to use with this :class:`~flash.core.model.Task`. If ``None``, a default will be selected
by the :class:`~flash.core.classification.ClassificationTask` depending on the ``multi_label`` argument.
learning_rate: The learning rate for the optimizer.
lambda_loss_cls: The value to scale the loss classification.
lambda_loss_bbox: The value to scale the bounding boxes loss.
lambda_loss_dir: The value to scale the bounding boxes direction loss.
"""
backbones: FlashRegistry = POINTCLOUD_OBJECT_DETECTION_BACKBONES
required_extras: str = "pointcloud"
def __init__(
self,
num_classes: int,
backbone: Union[str, Tuple[nn.Module, int]] = "pointpillars_kitti",
backbone_kwargs: Optional[Dict] = None,
loss_fn: LOSS_FN_TYPE = None,
optimizer: OPTIMIZER_TYPE = "Adam",
lr_scheduler: LR_SCHEDULER_TYPE = None,
metrics: METRICS_TYPE = None,
learning_rate: float = 1e-2,
lambda_loss_cls: float = 1.0,
lambda_loss_bbox: float = 1.0,
lambda_loss_dir: float = 1.0,
):
super().__init__(
model=None,
loss_fn=loss_fn,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
metrics=metrics,
learning_rate=learning_rate,
)
self.save_hyperparameters()
if backbone_kwargs is None:
backbone_kwargs = {}
if isinstance(backbone, tuple):
self.backbone, out_features = backbone
else:
self.model, out_features, self.collate_fn = self.backbones.get(backbone)(**backbone_kwargs)
self.backbone = self.model.backbone
self.neck = self.model.neck
self.loss_fn = get_callable_dict(self.model.loss)
if __FILE_EXAMPLE__ not in sys.argv[0]:
self.model.bbox_head.conv_cls = self.head = nn.Conv2d(
out_features, num_classes, kernel_size=(1, 1), stride=(1, 1)
)
def compute_loss(self, losses: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
losses = losses["loss"]
return (
self.hparams.lambda_loss_cls * losses["loss_cls"]
+ self.hparams.lambda_loss_bbox * losses["loss_bbox"]
+ self.hparams.lambda_loss_dir * losses["loss_dir"]
)
def compute_logs(self, logs: Dict[str, Any], losses: Dict[str, torch.Tensor]):
logs.update({"loss": self.compute_loss(losses)})
return logs
def training_step(self, batch: Any, batch_idx: int) -> Any:
return super().training_step((batch, batch), batch_idx)
def validation_step(self, batch: Any, batch_idx: int) -> Any:
super().validation_step((batch, batch), batch_idx)
def test_step(self, batch: Any, batch_idx: int) -> Any:
super().validation_step((batch, batch), batch_idx)
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:
results = self.model(batch)
boxes = self.model.inference_end(results, batch)
return {
DataKeys.INPUT: getattr(batch, "point", None),
DataKeys.PREDS: boxes,
DataKeys.METADATA: [a["name"] for a in batch.attr],
}
def forward(self, x) -> torch.Tensor:
"""First call the backbone, then the model head."""
# hack to enable backbone to work properly.
self.model.device = self.device
return self.model(x)
def _process_dataset(
self,
dataset: Input,
batch_size: int,
num_workers: int,
pin_memory: bool,
collate_fn: Callable,
shuffle: bool = False,
drop_last: bool = True,
sampler: Optional[Sampler] = None,
**kwargs
) -> DataLoader:
dataset.input_transform_fn = self.model.preprocess
dataset.transform_fn = self.model.transform
return DataLoader(
dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
collate_fn=collate_fn,
shuffle=shuffle,
drop_last=drop_last,
sampler=sampler,
)
|
python
|
#!/usr/bin/env python
import discord
import configparser
from libs import raid_combat
# Setup the config and Discord client
config = configparser.RawConfigParser()
config.read('config.conf')
client = discord.Client()
# create the dict of combat managers for each server
combat_managers = {}
@client.event
async def on_ready():
"""
Fires when the account is logged in.
:return:
"""
print('Logged in as {} with the ID {}\n'.format(client.user.name, client.user.id))
# setup a combat manager for each server connected
for server in client.servers:
combat_managers[server.name] = raid_combat.CombatManager(client, server)
@client.async_event
async def on_message(message):
"""
Fires when a message is received.
:param message: Discord message object
:return:
"""
if message.content == '!test':
await combat_managers[message.server.name].start_combat()
@client.async_event
async def on_reaction_add(reaction, user):
# await client.send_message(reaction.message.channel, "{} reacted with {}".format(user.name, reaction.emoji))
if client.user != user:
await combat_managers[reaction.message.server.name].route_action(reaction, user)
await client.remove_reaction(reaction.message, reaction.emoji, user)
if __name__ == '__main__':
token = config.get('Account', 'token')
client.run(token)
|
python
|
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
apikey = 'mykey'
secretkey = 'mysecret'
Driver = get_driver(Provider.AURORACOMPUTE)
conn = Driver(key=apikey, secret=secretkey)
|
python
|
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import BinaryIO, Iterable, Sequence, Tuple
from bentoml.adapters.file_input import FileInput
from bentoml.adapters.utils import (
check_file_extension,
get_default_accept_image_formats,
)
from bentoml.types import InferenceTask
from bentoml.utils.lazy_loader import LazyLoader
# BentoML optional dependencies, using lazy load to avoid ImportError
imageio = LazyLoader('imageio', globals(), 'imageio')
numpy = LazyLoader('numpy', globals(), 'numpy')
ApiFuncArgs = Tuple[
Sequence['numpy.ndarray'],
]
class ImageInput(FileInput):
"""Transform incoming image data from http request, cli or lambda event into numpy
array.
Handle incoming image data from different sources, transform them into numpy array
and pass down to user defined API functions
* If you want to operate raw image file stream or PIL.Image objects, use lowlevel
alternative FileInput.
Args:
accept_image_formats (string[]): A list of acceptable image formats.
Default value is loaded from bentoml config
'apiserver/default_image_input_accept_file_extensions', which is
set to ['.jpg', '.png', '.jpeg', '.tiff', '.webp', '.bmp'] by default.
List of all supported format can be found here:
https://imageio.readthedocs.io/en/stable/formats.html
pilmode (string): The pilmode to be used for reading image file into numpy
array. Default value is 'RGB'. Find more information at:
https://imageio.readthedocs.io/en/stable/format_png-pil.html
Raises:
ImportError: imageio package is required to use ImageInput
Example:
>>> from bentoml import BentoService, api, artifacts
>>> from bentoml.frameworks.tensorflow import TensorflowSavedModelArtifact
>>> from bentoml.adapters import ImageInput
>>>
>>> CLASS_NAMES = ['cat', 'dog']
>>>
>>> @artifacts([TensorflowSavedModelArtifact('classifier')])
>>> class PetClassification(BentoService):
>>> @api(input=ImageInput())
>>> def predict(self, image_ndarrays):
>>> results = self.artifacts.classifer.predict(image_ndarrays)
>>> return [CLASS_NAMES[r] for r in results]
"""
def __init__(
self, accept_image_formats=None, pilmode="RGB", **base_kwargs,
):
assert imageio, "`imageio` dependency can be imported"
super().__init__(**base_kwargs)
if 'input_names' in base_kwargs:
raise TypeError(
"ImageInput doesn't take input_names as parameters since bentoml 0.8."
"Update your Service definition "
"or use LegacyImageInput instead(not recommended)."
)
self.pilmode = pilmode
self.accept_image_formats = set(
accept_image_formats or get_default_accept_image_formats()
)
@property
def config(self):
return {
# Converting to list, google.protobuf.Struct does not work with tuple type
"accept_image_formats": list(self.accept_image_formats),
"pilmode": self.pilmode,
}
@property
def request_schema(self):
return {
"image/*": {"schema": {"type": "string", "format": "binary"}},
"multipart/form-data": {
"schema": {
"type": "object",
"properties": {
"image_file": {"type": "string", "format": "binary"}
},
}
},
}
@property
def pip_dependencies(self):
return ["imageio"]
def extract_user_func_args(
self, tasks: Iterable[InferenceTask[BinaryIO]]
) -> ApiFuncArgs:
img_list = []
for task in tasks:
if getattr(task.data, "name", None) and not check_file_extension(
task.data.name, self.accept_image_formats
):
task.discard(
http_status=400,
err_msg=f"Current service only accepts "
f"{self.accept_image_formats} formats",
)
continue
try:
img_array = imageio.imread(task.data, pilmode=self.pilmode)
img_list.append(img_array)
except ValueError as e:
task.discard(http_status=400, err_msg=str(e))
return (img_list,)
|
python
|
# app/chats/forms.py
|
python
|
from django.views.generic import UpdateView, ListView
import pyperclip
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.http.response import Http404
from django.shortcuts import render
from .models import Image, Categories, Location
# modal window settings
class ModalListView(ListView):
model = Image
template_name = 'welcome.html'
def get_queryset(self):
return Image.objects.all()
class ModalUpdateView(UpdateView):
model = Image
template_name = 'single_img.html'
def dispatch(self, *args, **kwargs):
self.id = kwargs['pk']
return super(ModalUpdateView, self).dispatch(*args, **kwargs)
# Create your views here.
def index(request):
title = 'sue gallery'
images = Image.objects.all()[3:9]
allimages = Image.objects.all()
image1 = Image.objects.get(id = 1)
image2 = Image.objects.get(id = 2)
image3 = Image.objects.get(id = 3)
return render(request, 'welcome.html', {'title':title, 'images':images, 'allimages':allimages,
'image1':image1, 'image2':image2, 'image3':image3})
def gallery_disp(request):
title = 'Gallery Display'
if 'location' in request.GET and request.GET['location']:
search_word = request.GET.get('location')
message = f'Filtered by Location : {search_word}'
location_images = Image.filter_by_location(search_word)
return render(request, 'gallery_display.html', {'message':message, 'images':location_images})
else:
images = Image.objects.all()
message = 'Not Filtered'
categories = Categories.objects.all()
locations = Location.objects.all()
return render (request, 'gallery_display.html', {'message':message,'title':title, 'images':images, 'categories':categories, 'locations':locations})
def single_image(request, image_id):
try:
single_image = Image.objects.get(id=image_id)
except:
raise Http404('Image Not Available')
return render(request, 'single_img.html', {'single_image': single_image})
def navbar_categories_show(request):
all_items = Categories.objects.all()
return render (request,'navbar.html', {'all_items':all_items})
def search_images(request):
title = 'Category search results'
if 'category_image' in request.GET and request.GET['category_image']:
search_term = request.GET.get('category_image')
message = f'{search_term}'
result_images = Image.search_by_category(search_term)
categories = Categories.objects.all()
return render(request, 'search_results.html', {'message':message,'title':title, 'result_images':result_images, 'categories':categories})
else:
message = 'You have not searched for anything'
return render(request, 'search_results.html', {'message':message, 'title':title})
|
python
|
from os import environ
def assert_in(file, files_to_check):
if file not in files_to_check:
raise AssertionError("{} does not exist in the list".format(str(file)))
return True
def assert_in_env(check_list: list):
for item in check_list:
assert_in(item, environ.keys())
return True
|
python
|
from django.contrib import messages
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from misago.admin.views import generic
from misago.users.forms.admin import RankForm
from misago.users.models import Rank
class RankAdmin(generic.AdminBaseMixin):
root_link = 'misago:admin:users:ranks:index'
model = Rank
form = RankForm
templates_dir = 'misago/admin/ranks'
message_404 = _("Requested rank does not exist.")
def update_roles(self, target, roles):
target.roles.clear()
if roles:
target.roles.add(*roles)
def handle_form(self, form, request, target):
super(RankAdmin, self).handle_form(form, request, target)
self.update_roles(target, form.cleaned_data['roles'])
class RanksList(RankAdmin, generic.ListView):
ordering = (('order', None), )
class NewRank(RankAdmin, generic.ModelFormView):
message_submit = _('New rank "%(name)s" has been saved.')
class EditRank(RankAdmin, generic.ModelFormView):
message_submit = _('Rank "%(name)s" has been edited.')
class DeleteRank(RankAdmin, generic.ButtonView):
def check_permissions(self, request, target):
message_format = {'name': target.name}
if target.is_default:
message = _('Rank "%(name)s" is default rank and can\'t be deleted.')
return message % message_format
if target.user_set.exists():
message = _('Rank "%(name)s" is assigned to users and can\'t be deleted.')
return message % message_format
def button_action(self, request, target):
target.delete()
message = _('Rank "%(name)s" has been deleted.')
messages.success(request, message % {'name': target.name})
class MoveDownRank(RankAdmin, generic.ButtonView):
def button_action(self, request, target):
try:
other_target = Rank.objects.filter(order__gt=target.order)
other_target = other_target.earliest('order')
except Rank.DoesNotExist:
other_target = None
if other_target:
other_target.order, target.order = target.order, other_target.order
other_target.save(update_fields=['order'])
target.save(update_fields=['order'])
message = _('Rank "%(name)s" has been moved below "%(other)s".')
targets_names = {'name': target.name, 'other': other_target.name}
messages.success(request, message % targets_names)
class MoveUpRank(RankAdmin, generic.ButtonView):
def button_action(self, request, target):
try:
other_target = Rank.objects.filter(order__lt=target.order)
other_target = other_target.latest('order')
except Rank.DoesNotExist:
other_target = None
if other_target:
other_target.order, target.order = target.order, other_target.order
other_target.save(update_fields=['order'])
target.save(update_fields=['order'])
message = _('Rank "%(name)s" has been moved above "%(other)s".')
targets_names = {'name': target.name, 'other': other_target.name}
messages.success(request, message % targets_names)
class RankUsers(RankAdmin, generic.TargetedView):
def real_dispatch(self, request, target):
redirect_url = reverse('misago:admin:users:accounts:index')
return redirect('%s?rank=%s' % (redirect_url, target.pk))
class DefaultRank(RankAdmin, generic.ButtonView):
def check_permissions(self, request, target):
if target.is_default:
message = _('Rank "%(name)s" is already default.')
return message % {'name': target.name}
def button_action(self, request, target):
Rank.objects.make_rank_default(target)
message = _('Rank "%(name)s" has been made default.')
messages.success(request, message % {'name': target.name})
|
python
|
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
General dataset implementations for TensorFlow
"""
from abc import ABCMeta, abstractmethod
from typing import Any, Callable, Dict, Iterable, List, Tuple
from sparseml.tensorflow_v1.utils import tf_compat
__all__ = [
"create_split_iterators_handle",
"Dataset",
]
def _make_initializable_iterator(dataset: tf_compat.data.Dataset):
"""
Make initializable iterator with different versions of TF
:param dataset: the dataset to create the iterator
:return: an iterator
"""
if hasattr(tf_compat.data, "make_initializable_iterator"):
return tf_compat.data.make_initializable_iterator(dataset)
else:
return dataset.make_initializable_iterator()
def create_split_iterators_handle(split_datasets: Iterable) -> Tuple[Any, Any, List]:
"""
Create an iterators handle for switching between datasets easily while training.
:param split_datasets: the datasets to create the splits and handle for
:return: a tuple containing the handle that should be set with a feed dict,
the iterator used to get the next batch,
and a list of the iterators created from the split_datasets
"""
output_types = None
output_shapes = None
split_iterators = []
for split_dataset in split_datasets:
# get_output_types and shapes are not available in TF 1.13 and prior
# hence the following conditional assignments
output_types = (
tf_compat.data.get_output_types(split_dataset)
if hasattr(tf_compat.data, "get_output_types")
else split_dataset.output_types
)
output_shapes = (
tf_compat.data.get_output_shapes(split_dataset)
if hasattr(tf_compat.data, "get_output_shapes")
else split_dataset.output_shapes
)
split_iterators.append(_make_initializable_iterator(split_dataset))
handle = tf_compat.placeholder(tf_compat.string, shape=[])
iterator = tf_compat.data.Iterator.from_string_handle(
handle, output_types, output_shapes
)
return handle, iterator, split_iterators
class Dataset(metaclass=ABCMeta):
"""
Generic dataset implementation for TensorFlow.
Expected to work with the tf.data APIs
"""
@abstractmethod
def __len__(self):
raise NotImplementedError()
def build(
self,
batch_size: int,
repeat_count: int = None,
shuffle_buffer_size: int = None,
prefetch_buffer_size: int = None,
num_parallel_calls: int = None,
) -> tf_compat.data.Dataset:
"""
Create the dataset in the current graph using tf.data APIs
:param batch_size: the batch size to create the dataset for
:param repeat_count: the number of times to repeat the dataset,
if unset or None, will repeat indefinitely
:param shuffle_buffer_size: None if not shuffling,
otherwise the size of the buffer to use for shuffling data
:param prefetch_buffer_size: None if not prefetching,
otherwise the size of the buffer to use for buffering
:param num_parallel_calls: the number of parallel calls to run the
processor function with
:return: a tf.data.Dataset instance
"""
with tf_compat.name_scope(self.name_scope()):
dataset = self.creator()
if shuffle_buffer_size and shuffle_buffer_size > 0:
dataset = dataset.shuffle(
shuffle_buffer_size, reshuffle_each_iteration=True
)
dataset = dataset.map(self.processor, num_parallel_calls=num_parallel_calls)
# Together with shuffling above, putting batch after repeat yields
# batches that straddle epoch boundaries
dataset = dataset.repeat(repeat_count)
dataset = dataset.batch(batch_size)
if prefetch_buffer_size and prefetch_buffer_size > 0:
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
def build_input_fn(
self,
batch_size: int,
repeat_count: int = None,
shuffle_buffer_size: int = None,
prefetch_buffer_size: int = None,
num_parallel_calls: int = None,
) -> Callable[[], Tuple[Dict[str, tf_compat.Tensor], Dict[str, tf_compat.Tensor]]]:
"""
Create an input_fn to be used with Estimators.
Invocation of the input_fn will create the dataset in the current graph
as well as return a tuple containing
(a dictionary of feature tensors, a dictionary of label tensors).
:param batch_size: the batch size to create the dataset for
:param repeat_count: the number of times to repeat the dataset,
if unset or None, will repeat indefinitely
:param shuffle_buffer_size: None if not shuffling,
otherwise the size of the buffer to use for shuffling data
:param prefetch_buffer_size: None if not prefetching,
otherwise the size of the buffer to use for buffering
:param num_parallel_calls: the number of parallel calls to run the
processor function with
:return: a callable representing the input_fn for an Estimator
"""
def input_fn() -> Tuple[
Dict[str, tf_compat.Tensor], Dict[str, tf_compat.Tensor]
]:
dataset = self.build(
batch_size,
repeat_count,
shuffle_buffer_size,
prefetch_buffer_size,
num_parallel_calls,
)
dataset_iter = _make_initializable_iterator(dataset)
tf_compat.add_to_collection(
tf_compat.GraphKeys.TABLE_INITIALIZERS, dataset_iter.initializer
)
iter_batch = dataset_iter.get_next()
features, labels = self.format_iterator_batch(iter_batch)
return features, labels
return input_fn
@abstractmethod
def creator(self) -> tf_compat.data.Dataset:
"""
Implemented by sub classes to create a tf.data dataset for the given impl.
:return: a created tf.data dataset
"""
raise NotImplementedError()
@abstractmethod
def processor(self, *args, **kwargs):
"""
Implemented by sub classes to parallelize and map processing functions
for loading the data of the dataset into memory.
:param args: generic inputs for processing
:param kwargs: generic inputs for processing
:return: the processed tensors
"""
raise NotImplementedError()
@abstractmethod
def format_iterator_batch(
self, iter_batch: Tuple[tf_compat.Tensor, ...]
) -> Tuple[Dict[str, tf_compat.Tensor], Dict[str, tf_compat.Tensor]]:
"""
Implemented by sub classes to parse the output from make_one_shot_iterator
into a features and labels dict to be used with Estimators
:param iter_batch: the batch ref returned from the iterator
:return: a tuple containing
(a dictionary of feature tensors, a dictionary of label tensors)
"""
raise NotImplementedError()
@abstractmethod
def name_scope(self) -> str:
"""
Implemented by sub classes to get a name scope for building the dataset
in the graph
:return: the name scope the dataset should be built under in the graph
"""
raise NotImplementedError()
|
python
|
#!/usr/bin/python
"""
This work targets for emulating fog computing infrastructure and fog service and network evaluation.
Original author Tzu-Chiao Yeh (@tz70s), 2017@National Taiwan University, Dependable Distributed System and Network Lab.
Checkout the License for using, modifying and publishing.
"""
import docker
class Env(object):
"""The declaration of some share variables."""
def __init__(self, node_num):
self.docker_client = self.init_docker_client()
self.cidr_list = self.set_cidr(node_num)
self.used_list = [False] * node_num
def init_docker_client(self):
"""Init docker client for docker daemon api """
client = docker.DockerClient(
base_url='unix://var/run/docker.sock', version='auto')
return client
def set_cidr(self, node_num):
"""Set CIDR for private ip pool assignment, return a list of cidrs"""
# TODO: support this, extend to ip_addr class C
if node_num > 200:
print("We don't support nodes exceed 200 currently")
exit(1)
sub = node_num
cidr_list = []
for _ in range(node_num):
sub += 1
substr = str(sub)
cidr_list.append('192.168.' + substr + '.0/24')
return cidr_list
def assign_cidr(self):
"""Assign CIDR for an absraction node, return a string from this method"""
for i in range(len(self.used_list)):
if self.used_list[i] is False:
self.used_list[i] = True
return self.cidr_list[i]
return ""
|
python
|
# Generated by Django 3.2.4 on 2021-09-09 13:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("accounts", "0005_add_field_last_modified_20210621_1058"),
]
operations = [
migrations.AddField(
model_name="govdepartment",
name="visualisation_url",
field=models.URLField(
blank=True,
default="",
help_text="URL of the visualisation page for this department",
verbose_name="Visualisation URL",
),
),
]
|
python
|
# -*- coding: utf-8 -*-
"""Shared utility functions for interacting with the data model."""
import logging
logger = logging.getLogger(__name__)
import os
from binascii import hexlify
def generate_random_digest(num_bytes=28, urandom=None, to_hex=None):
"""Generates a random hash and returns the hex digest as a unicode string.
Defaults to sha224::
>>> import hashlib
>>> h = hashlib.sha224()
>>> digest = generate_random_digest()
>>> len(h.hexdigest()) == len(digest)
True
Pass in ``num_bytes`` to specify a different length hash::
>>> h = hashlib.sha512()
>>> digest = generate_random_digest(num_bytes=64)
>>> len(h.hexdigest()) == len(digest)
True
Returns unicode::
>>> type(digest) == type(u'')
True
"""
# Compose.
if urandom is None:
urandom = os.urandom
if to_hex is None:
to_hex = hexlify
# Get random bytes.
r = urandom(num_bytes)
# Return as a unicode string.
return unicode(to_hex(r))
def ensure_unique(self, query, property_, value, max_iter=30, gen_digest=None):
"""Takes a ``candidate`` value for a unique ``property_`` and iterates,
appending an incremented integer until unique.
"""
# Compose.
if gen_digest is None:
gen_digest = generate_random_digest
# Unpack
candidate = value
# Iterate until the slug is unique.
n = 0
n_str = ''
while True:
# Keep trying slug, slug-1, slug-2, etc.
value = u'{0}{1}'.format(candidate, n_str)
existing = None
existing_instances = query.filter(property_==value).all()
for instance in existing_instances:
if instance != self:
existing = instance
break
if existing and n < 30:
n += 1
# If we've tried 1, 2 ... all the way to ``max_iter``, then
# fallback on appending a random digest rather than a sequential
# number.
suffix = str(n) if n < 20 else gen_digest(num_bytes=8)
n_str = u'-{0}'.format(suffix)
continue
break
return value
def get_or_create(cls, **kwargs):
"""Get or create a ``cls`` instance using the ``kwargs`` provided.
>>> from mock import Mock
>>> mock_cls = Mock()
>>> kwargs = dict(foo='bar')
If an instance matches the filter kwargs, return it::
>>> mock_cls.query.filter_by.return_value.first.return_value = 'exist'
>>> get_or_create(mock_cls, **kwargs)
'exist'
>>> mock_cls.query.filter_by.assert_called_with(**kwargs)
Otherwise return a new instance, initialised with the ``kwargs``::
>>> mock_cls = Mock()
>>> mock_cls.return_value = 'new'
>>> mock_cls.query.filter_by.return_value.first.return_value = None
>>> get_or_create(mock_cls, **kwargs)
'new'
>>> mock_cls.assert_called_with(**kwargs)
"""
instance = cls.query.filter_by(**kwargs).first()
if not instance:
instance = cls(**kwargs)
return instance
def get_all_matching(cls, column_name, values):
"""Get all the instances of ``cls`` where the column called ``column_name``
matches one of the ``values`` provided.
Setup::
>>> from mock import Mock
>>> mock_cls = Mock()
>>> mock_cls.query.filter.return_value.all.return_value = ['result']
Queries and returns the results::
>>> get_all_matching(mock_cls, 'a', [1,2,3])
['result']
>>> mock_cls.a.in_.assert_called_with([1,2,3])
>>> mock_cls.query.filter.assert_called_with(mock_cls.a.in_.return_value)
"""
column = getattr(cls, column_name)
query = cls.query.filter(column.in_(values))
return query.all()
def get_object_id(instance):
"""Return an identifier that's unique across database tables, e.g.::
>>> from mock import MagicMock
>>> mock_user = MagicMock()
>>> mock_user.__tablename__ = 'users'
>>> mock_user.id = 1234
>>> get_object_id(mock_user)
u'users#1234'
"""
return u'{0}#{1}'.format(instance.__tablename__, instance.id)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.