content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
def palindrome(number):
return number == number[::-1]
if __name__ == '__main__':
init = int(input())
number = init + 1
while not palindrome(str(number)):
number += 1
print(number - init)
|
python
|
import unittest
from bdflib import reader
import charset
class JISTest(unittest.TestCase):
def test_jis(self):
with open('bdf/jiskan24-2003-1.bdf', 'rb') as f:
bdf = reader.read_bdf(f)
cconv = charset.JIS()
single_cp = 0
multi_cp = 0
for cp in bdf.codepoints():
unicode = cconv.unicode(cp)
if unicode is None:
pass
elif len(unicode) == 1:
single_cp += 1
else:
multi_cp += 1
self.assertEqual(single_cp, 8772)
self.assertEqual(multi_cp, 25)
|
python
|
"""
This is the main program file for the DDASM assembler. It will read a DDA program file and convert it to a VHDL \
description of a ROM file. This ROM file serves as the program memory for the LDD mark II processor.
DDASM = Digital Design Assmebly
LDD = Lab Digital Design
Digital Design refers to the Digital Design courses of the Faculty Engineering Technology - KU Leuven, Ghent
"""
import sys
import logging
from asminfo import asminfo
from datetime import datetime
log_file = None
def main(argv):
"""
This function executes the necessary steps for assembling the program ROM.
1) loading (and analysing) a program
2) loading a ROM template file
3) generating the ROM and writing to a VHDL file
:param argv: The list of command line arguments passed to this script.
:return: The script returns exit code 0 on success; -1 otherwise.
"""
global log_file
try:
log_file = open('build.log', 'w')
log("DDASM v0.1", True)
except IOError as ioe:
print('Failed to open log file (build.log). Is it still open?')
print(ioe.args[1])
print('FAILURE')
sys.exit(-1)
# Parse input arguments
try:
file_names = get_file_names(argv)
except ValueError:
print('FAILURE - check build.log')
log('FAILURE', False)
log_file.close()
sys.exit(-1)
except Exception as e:
log('Unknown error in "get_file_names()".', True)
log('FAILURE - check python logs', True)
log_file.close()
logging.exception(e)
sys.exit(-1)
# Read and pre-process program
try:
analysed_program = load_program(file_names['input_file'])
except IOError:
print('FAILURE - check build.log')
log('FAILURE', False)
log_file.close()
sys.exit(-1)
except ValueError:
print('FAILURE - check build.log')
log('FAILURE', False)
log_file.close()
sys.exit(-1)
except Exception as e:
log('Unexpected error in "load_program()".', True)
log('FAILURE - check python logs', True)
log_file.close()
logging.exception(e)
sys.exit(-1)
# Read ROM template
try:
rom = load_template(file_names['template_file'], file_names['output_file'])
except ValueError or IOError:
print('FAILURE - check build.log')
log('FAILURE', False)
log_file.close()
sys.exit(-1)
except Exception as e:
log('Unexpected error in "load_template()".', True)
log('FAILURE - check python logs', True)
log_file.close()
logging.exception(e)
sys.exit(-1)
# generate VHDL ROM file
try:
generate_rom_file(analysed_program, rom, file_names['output_file'])
except ValueError or IOError:
print('FAILURE - check build.log')
log('FAILURE', False)
log_file.close()
sys.exit(-1)
except Exception as e:
log('Unexpected error in "generate_rom_file()".', True)
log('FAILURE - check python logs', True)
log_file.close()
logging.exception(e)
sys.exit(-1)
log("SUCCESS", True)
log_file.close()
sys.exit(0)
def log(message, do_print):
"""
Log a message to the build log and print in the console (optionally).
:param message: String containing the log message.
:param do_print: Setting do_print to True will also display de logged message in the console.
:return: Nothing
"""
global log_file
log_file.write(message)
log_file.write('\n')
if do_print:
print(message)
def print_usage():
"""
Print an informational message on how to use the DDASM assembler.
:return: Nothing
"""
print('USAGE: python ddasm.py program_name.dda [vhdl_rom.vhd]')
print(' * program_name.dda : File containing the assembly program')
print(' * vhdl_rom.vhd : (optional) File where VHDL description of program ROM is written to.')
print(' If not specified, the file name will be "program_name.vhd".')
def get_file_names(argv):
"""
Analyse the list of arguments to determine which files should be loaded.
:param argv: This is the list of arguments passed with the "main" script. The first item in the list, argv[0], \
the name of the script.
:return: a dictionary containing the name of the 'input_file', 'output_file' and the 'template_file'
"""
argc = len(argv)
do_print = True
fns = {'input_file': '', 'output_file': '', 'template_file': 'ROM_template.vhd'}
if argc == 1:
err = 'ERROR: Not enough input arguments (' + str(argc-1) + '). Expecting at least 1.'
log(err, do_print)
print_usage()
raise ValueError
elif argc == 2:
fns['input_file'] = argv[1]
elif argc == 3:
fns['input_file'] = argv[1]
fns['output_file'] = argv[2]
else:
err = 'ERROR: Too many input arguments (' + str(argc - 1) + '). Expecting 2 at most.'
log(err, False)
print_usage()
raise ValueError
if len(fns['output_file']) == 0:
dot_index = fns['input_file'].find('.')
if dot_index < 0:
log('WARNING: Input file name is missing an extension!', True)
input_file_name = fns['input_file']
else:
input_file_name = fns['input_file'][0:dot_index]
fns['output_file'] = input_file_name + '.vhd'
else:
dot_index = fns['output_file'].find('.')
if dot_index < 0:
log('WARNING: Output file name is missing an extension!', True)
msg = ' - input: ' + fns['input_file'] + '\n'
msg += ' - output: ' + fns['output_file'] + '\n'
msg += ' - template: ' + fns['template_file'] + '\n'
log(msg, False)
return fns
def load_program(filename):
"""
Load and analyse the DDASM program.
:param filename: Specifies the name of the file that contains the program.
:return: A dictionary containing information of the analysed program.
"""
do_print = False
# load the program
try:
with open(filename) as f:
raw_text = f.readlines()
except IOError:
err = 'ERROR: Failed to open program (' + filename + ').'
log(err, True)
raise IOError
log('Analysing program...', True)
# analyse text
line_index = 0
pinfo = {'program': {}, 'labels': {}, 'symbols': {}, 'size': 0}
address = 0
for line in raw_text:
is_instruction = False
# split line into categories
sline = line.strip().lower()
scindex = sline.find(';')
# isolate instruction from comment
if scindex >= 0:
asm = sline[0:scindex].strip()
else:
asm = sline
# check for #define
if asm.lower().find('#define') >= 0:
# check formatting of #define-directive
ops = split_instruction(asm.lower())
if len(ops) < 3:
err = 'ERROR: "#define" is missing arguments'
log(err, True)
if len(ops) > 3:
err = 'ERROR: Too much arguments with "#define"'
log(err, True)
if ops[0] != '#define':
err = 'ERROR: Found something before #define. Check your code!'
log(err, True)
symbol = ops[1]
value = ops[2]
if symbol[0].isdigit():
err = 'ERROR: Symbol name can not start with a number'
log(err, True)
# check if symbol is already defined
defined_symbols = pinfo['symbols'].keys()
if symbol in defined_symbols:
err = 'ERROR: Symbol name "' + symbol + '" already defined.\n'
err += '\tline ' + str(line_index + 1) + ' -> ' + line.strip()
log(err, True)
raise ValueError
else:
# if not, add it to the list
pinfo['symbols'][symbol] = value
# no need to further analyse this line, go to next
else:
# check for label
scindex = asm.find(':')
if scindex == 0:
err = 'ERROR: Semicolon (:) at the start of line.\n'
err += '\tline ' + str(line_index+1) + ' -> ' + line
err += 'Expecting a label.'
log(err, True)
raise ValueError
if scindex > 0:
# we have a label, now we do some checks
label = asm[0:scindex].strip()
# check if first character is a number
if label[0].isdigit():
err = 'ERROR: Label can not start with a number.\n'
err += '\tline ' + str(line_index+1) + ' -> ' + line.strip()
log(err, True)
raise ValueError
# check if the label contains spaces
if (label.find(' ') > 0) or (label.find('\t') > 0):
err = 'ERROR: Label can not contain spaces.\n'
err += '\tline ' + str(line_index+1) + ' -> ' + line.strip()
log(err, True)
raise ValueError
# check if the label is already defined
defined_labels = pinfo['labels'].keys()
if label in defined_labels:
err = 'ERROR: Label "' + label + '" already defined.\n'
err += '\tline ' + str(line_index+1) + ' -> ' + line.strip()
log(err, True)
raise ValueError
else:
# add label to list
pinfo['labels'][label] = '%02x' % address
# now we do some further checking
if 'reset' in defined_labels:
if pinfo['labels']['reset'] != '00':
err = 'ERROR: Label "reset" should have address "00".\n'
err += '\tline ' + str(line_index+1) + ' -> ' + line.strip()
log(err, True)
raise ValueError
if 'isr' in defined_labels:
if pinfo['labels']['isr'] != '02':
err = 'ERROR: Label "isr" should have address "02".\n'
err += '\tline ' + str(line_index+1) + ' -> ' + line.strip()
log(err, True)
raise ValueError
# in case that an instruction follows the label
asm = asm[scindex:].replace(':', ' ').strip()
vhdl_comment = ' -- ' + asm + '\n'
ins = None
op_1 = None
op_2 = None
# parse instruction
ops = split_instruction(asm)
if len(ops) > 0:
is_instruction = True
ins = ops[0]
if len(ops) > 1:
op_1 = ops[1]
if len(ops) > 2:
op_2 = ops[2]
if len(ops) > 3:
err = 'ERROR: Wrong instruction format.\n'
err += '\tline ' + str(line_index + 1) + ' -> ' + line.strip()
log(err, True)
raise ValueError
# check for virtual instruction and if so do replacement
if ins in asminfo['virtual_instructions']:
op_2 = asminfo['virtual_instructions'][ins]['operand_2']
ins = asminfo['virtual_instructions'][ins]['replace_with']
# update program info (and set next instruction address)
if is_instruction:
pinfo['program'][line_index] = {'address': address,
'instruction': ins,
'operand_1': op_1,
'operand_2': op_2,
'comment': vhdl_comment}
address = address + 2
# process next line
line_index = line_index + 1
# Log a list of the labels that are defined in the program
log('- Labels defined in ' + filename + ':', do_print)
labels_table = format_symbols_table(pinfo['labels'], 'label', 'address (hex)')
log(labels_table, do_print)
# Log a list of the symbols that are defined in the program
log('- Symbols defined in ' + filename + ':', do_print)
symbols_table = format_symbols_table(pinfo['symbols'], 'symbols', 'value')
log(symbols_table, do_print)
# Update program size
pinfo['size'] = address
msg = ' - Program size: ' + str(pinfo['size']) + ' bytes.\n\nAnalysis complete.\n\n'
log(msg, True)
return pinfo
def load_template(filename, romfilename):
"""
Load the template of the program ROM.
:param filename: The program ROM template file name.
:param romfilename: The file name of the resulting ROM file
:return: A dictionary with program ROM structure and memory size
"""
# To put creation date in ROM file
dt = datetime.now()
datestr = dt.strftime('-- Created: %H:%M:%S %d-%m-%Y\r\n')
# To put filename in ROM file
filestr = '-- File: ' + romfilename + '\r\n'
# print(datestr)
tinfo = {'first_part': list(), 'last_part': list(), 'program_space': None}
log('Loading ROM template...', True)
# load the template
try:
with open(filename) as f:
raw_text = f.readlines()
except IOError as ioe:
err = 'ERROR: Failed to load template file (' + filename + ').'
log(err, True)
log(ioe.args[1], False)
raise IOError
section = ['start', 'program', 'end']
si = 0
for line in raw_text:
if section[si] == 'start':
if '-- Created' in line:
tinfo['first_part'].append(datestr)
elif '-- File' in line:
tinfo['first_part'].append(filestr)
else:
tinfo['first_part'].append(line)
if '-- program start' in line:
si += 1
tinfo['program_space'] = 0
elif section[si] == 'program':
if '-- program end' in line:
si += 1
tinfo['last_part'].append(line)
else:
tinfo['program_space'] += 1
elif section[si] == 'end':
tinfo['last_part'].append(line)
else:
log('ERROR: Error while reading template file.', True)
raise ValueError
if section[si] != 'end':
log('ERROR: ROM template is missing mandatory lines.', True)
raise ValueError
log('ROM template loaded.\n', True)
return tinfo
def generate_rom_file(pinfo, rom, filename):
"""
Generate program ROM file in VHDL containing the instructions of the assembled program
:param pinfo: A dictionary containing the analyzed program (provided by load_program(...) ).
:param rom: A dictionary containing the prorgam ROM structure (provided by load_template(...) )
:param filename: The file name of the VHDL file.
:return: Nothing
"""
do_print = False
log('Generating ROM memory file...', True)
try:
rom_file = open(filename, 'w')
except IOError:
log('ERROR: Failed to open target file.', True)
raise IOError
# check if memory space has not been succeeded
if pinfo['size'] > rom['program_space']:
err = 'ERROR: Program size (' + str(pinfo['size']) + ' bytes) exceeds available memory (' \
+ str(rom['program_space']) + ' bytes).'
log(err, True)
raise ValueError
# Write first part of ROM file
for line in rom['first_part']:
rom_file.write(line)
# Write program to ROM file
last_address = 0
for line in sorted(pinfo['program']):
instruction_info = pinfo['program'][line]
log(str(instruction_info), do_print)
# get instruction type
try:
instruction_type = asminfo['instructions'][instruction_info['instruction']]['type']
except KeyError:
err = 'ERROR: Unknown instruction "' + instruction_info['instruction'] + '" (line ' + str(line+1) + ').'
log(err, True)
raise ValueError
# get instruction opcode
instruction_opcode = asminfo['instructions'][instruction_info['instruction']]['opcode']
# print(instruction_type)
rom_line = vhdl_fixed_start(instruction_info['address'])
if instruction_type == 'jump':
# get memory address
if instruction_info['operand_1'] is None:
err = 'ERROR: Jump address not defined for instruction "' + instruction_info['instruction'] \
+ '" (line ' + str(line + 1) + ').'
log(err, True)
raise ValueError
# lookup address in case label is used
address = lookup_name(instruction_info['operand_1'], pinfo)
if address is None:
err = 'ERROR: Name "' + instruction_info['operand_1'] + '" is not defined (line ' + str(line + 1) + ').'
log(err, True)
raise ValueError
# convert hex address to binary representation
memory_address = address_hex_to_binary(address)
# assemble jump instruction
rom_line += instruction_opcode + '000",' + instruction_info['comment']
if instruction_info['address'] == (rom['program_space'] - 2):
rom_line += vhdl_fixed_start(instruction_info['address'] + 1) + memory_address + '"\n'
else:
rom_line += vhdl_fixed_start(instruction_info['address'] + 1) + memory_address + '",\n'
elif instruction_type == 'jump_conditional':
# get memory address
if instruction_info['operand_1'] is None:
err = 'ERROR: Jump address not defined for instruction "' + instruction_info['instruction']\
+ '" (line ' + str(line+1) + ').'
log(err, True)
raise ValueError
# lookup address in case label is used
address = lookup_name(instruction_info['operand_1'], pinfo)
if address is None:
err = 'ERROR: Name "' + instruction_info['operand_1'] + '" is not defined (line ' + str(line+1) + ').'
log(err, True)
raise ValueError
# convert hex address to binary representation
memory_address = address_hex_to_binary(address)
# look up conditional flag
conditional_flag = asminfo['instructions'][instruction_info['instruction']]['flag']
# assemble jump instruction
rom_line += instruction_opcode + conditional_flag + '",' + instruction_info['comment']
if instruction_info['address'] == (rom['program_space'] - 2):
rom_line += vhdl_fixed_start(instruction_info['address'] + 1) + memory_address + '"\n'
else:
rom_line += vhdl_fixed_start(instruction_info['address'] + 1) + memory_address + '",\n'
elif instruction_type == 'jump_no_address':
# assemble jump instruction (no address specified)
rom_line += instruction_opcode + '000",' + instruction_info['comment']
if instruction_info['address'] == (rom['program_space'] - 2):
rom_line += vhdl_fixed_start(instruction_info['address']+1) + '00000000"\n'
else:
rom_line += vhdl_fixed_start(instruction_info['address']+1) + '00000000",\n'
elif instruction_type == 'single_register':
# get destination/source register code
if instruction_info['operand_1'] is None:
err = 'ERROR: Source/destination register not defined for instruction "'\
+ instruction_info['instruction'] + '" (line ' + str(line+1) + ').'
log(err, True)
raise ValueError
# look-up symbol
operand_1 = lookup_name(instruction_info['operand_1'], pinfo)
try:
rds_code = asminfo['registers'][operand_1]
except KeyError:
err = 'ERROR: Wrong register name "' + instruction_info['operand_1'] + '" (line ' + str(line+1) + ').'
log(err, True)
raise ValueError
# assemble single register instruction
rom_line += instruction_opcode + rds_code + '",' + instruction_info['comment']
if instruction_info['address'] == (rom['program_space'] - 2):
rom_line += vhdl_fixed_start(instruction_info['address']+1) + rds_code + '00000"\n'
else:
rom_line += vhdl_fixed_start(instruction_info['address']+1) + rds_code + '00000",\n'
elif instruction_type == 'register_to_register' or instruction_type == 'indirect_memory':
# get destination register code
if instruction_info['operand_1'] is None:
err = 'ERROR: Destination register not defined for instruction "'\
+ instruction_info['instruction'] + '" (line ' + str(line+1) + ').'
log(err, True)
raise ValueError
# look-up symbol
operand_1 = lookup_name(instruction_info['operand_1'], pinfo)
try:
rd_code = asminfo['registers'][operand_1]
except KeyError:
err = 'ERROR: Wrong register name "' + instruction_info['operand_1'] + '" (line ' + str(line+1) + ').'
log(err, True)
raise ValueError
# get source register code
if instruction_info['operand_2'] is None:
err = 'ERROR: Source register not defined for instruction "'\
+ instruction_info['instruction'] + '" (line ' + str(line+1) + ').'
log(err, True)
raise ValueError
# look-up symbol
operand_2 = lookup_name(instruction_info['operand_2'], pinfo)
try:
rs_code = asminfo['registers'][operand_2]
except KeyError:
err = 'ERROR: Wrong register name "' + instruction_info['operand_2'] + '" (line ' + str(line+1) + ').'
log(err, True)
raise ValueError
# assemble register-to-register instruction
rom_line += instruction_opcode + rd_code + '",' + instruction_info['comment']
if instruction_info['address'] == (rom['program_space'] - 2):
rom_line += vhdl_fixed_start(instruction_info['address'] + 1) + rs_code + '00000"\n'
else:
rom_line += vhdl_fixed_start(instruction_info['address'] + 1) + rs_code + '00000",\n'
elif instruction_type == 'register_to_memory':
# get memory address
# check if 0 < length <= 2
if instruction_info['operand_1'] is None:
err = 'ERROR: Target address unspecified for instruction "' + instruction_info['instruction']\
+ '" (line ' + str(line+1) + ').'
log(err, True)
raise ValueError
# look-up symbol
operand_1 = lookup_name(instruction_info['operand_1'], pinfo)
if operand_1 is None:
err = 'ERROR: Target address name "' + instruction_info['operand_1'] + '" unspecified for instruction "'\
+ instruction_info['instruction'] + '" (line ' + str(line+1) + ').'
log(err, True)
raise ValueError
else:
instruction_info['operand_1'] = operand_1
# make sure the address has the correct length
if len(instruction_info['operand_1']) > 2:
err = 'ERROR: Target address "' + instruction_info['operand_1'] + '" is too long (line '\
+ str(line+1) + ').'
log(err, True)
raise ValueError
# convert to binary representation
try:
memory_address = address_hex_to_binary(instruction_info['operand_1'])
except KeyError:
err = 'ERROR: "' + instruction_info['operand_1'] + '" is not a hexadecimal address (line '\
+ str(line+1) + ').'
log(err, True)
raise ValueError
# get source register code
if instruction_info['operand_2'] is None:
err = 'ERROR: Source register not defined for instruction "'\
+ instruction_info['instruction'] + '" (line ' + str(line+1) + ').'
log(err, True)
raise ValueError
# look-up symbol
operand_2 = lookup_name(instruction_info['operand_2'], pinfo)
try:
rs_code = asminfo['registers'][operand_2]
except KeyError:
err = 'ERROR: Wrong register name "' + instruction_info['operand_2'] + '" (line ' + str(line+1) + ').'
log(err, True)
raise ValueError
# assemble register-to-memory instruction
rom_line += instruction_opcode + rs_code + '",' + instruction_info['comment']
if instruction_info['address'] == (rom['program_space'] - 2):
rom_line += vhdl_fixed_start(instruction_info['address'] + 1) + memory_address + '"\n'
else:
rom_line += vhdl_fixed_start(instruction_info['address'] + 1) + memory_address + '",\n'
elif instruction_type == 'x_to_register':
# get destination register code
if instruction_info['operand_1'] is None:
err = 'ERROR: Destination register not defined for instruction "'\
+ instruction_info['instruction'] + '" (line ' + str(line+1) + ').'
log(err, True)
raise ValueError
# look-up symbol
operand_1 = lookup_name(instruction_info['operand_1'], pinfo)
try:
rd_code = asminfo['registers'][operand_1]
except KeyError:
err = 'ERROR: Wrong register name "' + instruction_info['operand_1'] + '" (line ' + str(line+1) + ').'
log(err, True)
raise ValueError
# get memory address or literal
# check if operand_2 is present
if instruction_info['operand_2'] is None:
err = 'ERROR: Literal or memory location unspecified for instruction "'\
+ instruction_info['instruction'] + '" (line ' + str(line + 1) + ').'
log(err, True)
raise ValueError
# look-up symbol
operand_2 = lookup_name(instruction_info['operand_2'], pinfo)
if operand_2 is None:
err = 'ERROR: Target address name "' + instruction_info['operand_2'] + '" unspecified for instruction "'\
+ instruction_info['instruction'] + '" (line ' + str(line+1) + ').'
log(err, True)
raise ValueError
else:
instruction_info['operand_2'] = operand_2
# check length
if len(instruction_info['operand_2']) > 2:
err = 'ERROR: Literal or memory location "' + instruction_info['operand_2'] + '" is too long (line '\
+ str(line + 1) + ').'
log(err, True)
raise ValueError
# convert to binary representation
try:
address_literal = address_hex_to_binary(instruction_info['operand_2'])
except KeyError:
err = 'ERROR: "' + instruction_info['operand_2'] + '" is not a hexadecimal address or number (line '\
+ str(line + 1) + ').'
log(err, True)
raise ValueError
# assemble memory/literal-to-register instruction
rom_line += instruction_opcode + rd_code + '",' + instruction_info['comment']
if instruction_info['address'] == (rom['program_space'] - 2):
rom_line += vhdl_fixed_start(instruction_info['address'] + 1) + address_literal + '"\n'
else:
rom_line += vhdl_fixed_start(instruction_info['address'] + 1) + address_literal + '",\n'
else:
# unsupported instruction type
err = 'ERROR: Unknown instruction type (' + instruction_type + ').'
log(err, True)
raise ValueError
log(rom_line, do_print)
rom_file.write(rom_line)
last_address = instruction_info['address'] + 2
# fill remaining memory space with zeros
for remaining_address in range(last_address, rom['program_space']):
if remaining_address == (rom['program_space']-1):
rom_line = vhdl_fixed_start(remaining_address) + '00000000"\n'
else:
rom_line = vhdl_fixed_start(remaining_address) + '00000000",\n'
rom_file.write(rom_line)
# write last part of template to ROM file
for line in rom['last_part']:
rom_file.write(line)
rom_file.close()
log('Program ROM complete.', True)
def vhdl_fixed_start(address):
"""
Generate the start of a line in the VHDL ROM.
:param address: address of the ROM line.
:return: a string containg the start of the ROM line.
"""
rom_start = '\t\t%3d => "' % address
return rom_start
def split_instruction(ins):
"""
Split an assembly instruction into seperate parts.
:param ins: The assembly line.
:return: A list with the parts of the instruction.
"""
newins = ins.replace(',', ' ')
splitins = newins.split()
return splitins
def lookup_name(name, pinfo):
"""
Lookup if a symbol or label name is defined in the program info dictionary.
:param name: Name to look up.
:param pinfo: A dictionary containing the program info.
:return: None if the name does not exist, otherwise the name itself.
"""
if is_defined(name, asminfo['registers']):
return name
if is_defined(name, pinfo['labels']):
return pinfo['labels'][name]
if is_defined(name, pinfo['symbols']):
return pinfo['symbols'][name]
if is_hex(name): # should be last to avoid early return on names that can be interpreted as hex (eg: BCD)
return name
return None
def address_hex_to_binary(address):
if len(address) == 1:
address = '0' + address
"""
Convert a hexadecimal address (string representation) to binary (string representation).
:param address: The address to convert.
:return: The binary address.
"""
binary_lookup = {
'0': '0000',
'1': '0001',
'2': '0010',
'3': '0011',
'4': '0100',
'5': '0101',
'6': '0110',
'7': '0111',
'8': '1000',
'9': '1001',
'a': '1010',
'b': '1011',
'c': '1100',
'd': '1101',
'e': '1110',
'f': '1111'
}
if len(address) == 1:
binary_address = '0000'
else:
try:
binary_address = binary_lookup[address[0]]
except KeyError as ke:
raise ke
try:
binary_address += binary_lookup[address[1]]
except KeyError as ke:
raise ke
return binary_address
def is_hex(s):
"""
Test if a string is a hexadecimal in string representation.
:param s: The string to test.
:return: True if hexadecimal, False if not.
"""
try:
int(s, 16)
return True
except ValueError:
return False
def is_defined(s, table):
"""
Test if a symbol or label is defined.
:param s: The symbol to look up.
:param table: A dictionary containing the labels and symbols.
:return: True if defined, False otherwise.
"""
try:
table[s] # Exploiting possible KeyError
return True
except KeyError:
return False
def format_symbols_table(symbols_list, symbol_name, value='address'):
"""
Print out the table with symbols and labels in a readable format.
:param symbols_list:
:param symbol_name:
:param value:
:return: Nothing
"""
if not bool(symbols_list.keys()):
msg = '\n\tNo symbols of type "' + symbol_name + '" have been defined.\n'
return msg
longest_name = 0
longest_value = 0
# determine maximum length of items in each column
for symbol in symbols_list.keys():
if len(symbol) > longest_name:
longest_name = len(symbol)
if len(symbols_list[symbol]) > longest_value:
longest_value = len(symbols_list[symbol])
if longest_name < len(symbol_name):
longest_name = len(symbol_name)
if longest_value < len(value):
longest_value = len(value)
# top rule
table = '\n\t+'
for x in range(0, longest_name):
table += '-'
table += '--+'
for x in range(0, longest_value):
table += '-'
table += '--+\n'
# table header
table += '\t| ' + symbol_name
for x in range(len(symbol_name), longest_name):
table += ' '
table += ' | ' + value
for x in range(len(value), longest_value):
table += ' '
table += ' |\n'
# middle rule
table += '\t+'
for x in range(0, longest_name):
table += '-'
table += '--+'
for x in range(0, longest_value):
table += '-'
table += '--+\n'
# print lines
for symbol in symbols_list.keys():
table += '\t| ' + symbol
for x in range(len(symbol), longest_name):
table += ' '
table += ' | ' + symbols_list[symbol]
for x in range(len(symbols_list[symbol]), longest_value):
table += ' '
table += ' |\n'
# bottom rule
table += '\t+'
for x in range(0, longest_name):
table += '-'
table += '--+'
for x in range(0, longest_value):
table += '-'
table += '--+\n'
return table
if __name__ == "__main__":
main(sys.argv)
|
python
|
# 短信验证码 过期时间
SMS_CODE_EXPIRETIME = 300
# 手机号码在redis中的过期时间,解决短时间内 向同一手机号重复发送短信 的问题
MOBILE_EXPIRETIME = 60
# 标识某一属性在 redis 中的使用次数,解决类似于抽奖次数的问题
MOBILE_FREQUENCY = 1
|
python
|
import os
import threading
from cheroot import wsgi
from wsgidav.wsgidav_app import WsgiDAVApp
from powerhub.directories import WEBDAV_RO, WEBDAV_BLACKHOLE, \
UPLOAD_DIR, WEBDAV_DIR
from powerhub.args import args
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import logging
logger = logging.getLogger("wsgidav")
logger.propagate = True
logger.setLevel(logging.DEBUG)
config = {
"host": '127.0.0.1',
"port": args.WEBDAV_PORT,
"dir_browser": {"enable": True},
"http_authenticator": {
# None: dc.simple_dc.SimpleDomainController(user_mapping)
"domain_controller": None,
"accept_basic": True, # Allow basic authentication, True or False
"accept_digest": True, # Allow digest authentication, True or False
"default_to_digest": True, # True or False
# Name of a header field that will be accepted as authorized user
"trusted_auth_header": None,
},
#: Used by SimpleDomainController only
"simple_dc": {"user_mapping": {"*": True}},
"provider_mapping": {
"/webdav_ro": {
"root": WEBDAV_RO,
"readonly": True,
"auth": "anonymous",
},
"/webdav/": {
"root": WEBDAV_DIR,
"readonly": False,
"auth": "anonymous",
},
},
"verbose": 1,
}
app = WsgiDAVApp(config)
server_args = {
"bind_addr": (config["host"], config["port"]),
"wsgi_app": app,
}
server = wsgi.Server(**server_args)
class MyHandler(FileSystemEventHandler):
"""Responsible for copying files from the BLACKHOLE_DIR to the
UPLOAD_DIR"""
def on_created(self, event):
os.rename(
os.path.join(event.src_path),
os.path.join(UPLOAD_DIR, os.path.basename(event.src_path)),
)
def watch_blackhole_folder():
observer = Observer()
observer.schedule(MyHandler(), path=WEBDAV_BLACKHOLE, recursive=False)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def run_webdav():
threading.Thread(
target=watch_blackhole_folder,
daemon=True,
).start()
try:
server.start()
except KeyboardInterrupt:
print("Caught Ctrl-C, shutting down...")
finally:
server.stop()
|
python
|
#!/usr/bin/env python3
import os
import time
import subprocess
import shutil
import datetime
import tempfile
def main():
TMPROOT = r'/Volumes/RAM Disk'
REPO = r"/Volumes/RAM Disk/redis/"
SIT_BIN = r"/Users/abcdabcd987/Developer/sit/bin/sit"
report = open('stress_test_%s.csv' % datetime.datetime.today().strftime('%Y%m%d_%H%M%S'), 'w')
report.write('git_cur, sit_cur, git_tot, sit_tot, git_commit_cur, sit_commit_cur, git_commit_tot, sit_commit_tot, git_add_cur, sit_add_cur, git_add_tot, sit_add_tot\n')
# get commits
commits = ['2.4']
args = ['git', 'log', '--pretty=%P', commits[0]]
proc = subprocess.Popen(args, cwd=REPO, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outs, errs = proc.communicate(timeout=15)
for line in outs.decode('utf-8').split('\n'):
last_commit = line.split(' ')[0]
if not last_commit:
break
commits.append(last_commit)
commits.reverse()
# init repo
tmp_sit = tempfile.mkdtemp(dir=TMPROOT)
tmp_git = tempfile.mkdtemp(dir=TMPROOT)
print('tmp_sit', tmp_sit)
print('tmp_git', tmp_git)
args = [SIT_BIN, 'init']
proc = subprocess.Popen(args, cwd=tmp_sit)
outs, errs = proc.communicate(timeout=15)
args = ['git', 'init']
proc = subprocess.Popen(args, cwd=tmp_git)
outs, errs = proc.communicate(timeout=15)
# write author config
with open(os.path.join(tmp_sit, '.sit', 'config'), 'w') as f:
f.write("user.name: Lequn Chen\nuser.email: [email protected]\n")
timeused_sit = 0
timeused_git = 0
timeused_sit_commit = 0
timeused_git_commit = 0
timeused_sit_add = 0
timeused_git_add = 0
# loop commits
for i, commit in enumerate(commits):
cur_timeused_sit = 0
cur_timeused_git = 0
# delete old files
for path in os.listdir(tmp_sit):
if path != '.' and path != '..' and path != '.sit':
p1 = os.path.join(tmp_sit, path)
p2 = os.path.join(tmp_git, path)
if os.path.isdir(p1):
shutil.rmtree(p1)
shutil.rmtree(p2)
else:
os.remove(p1)
os.remove(p2)
# checkout commit
args = ['git', 'checkout', commit]
proc = subprocess.Popen(args, cwd=REPO, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outs, errs = proc.communicate(timeout=15)
# copy new files
for path in os.listdir(REPO):
if path != '.' and path != '..' and path != '.git':
src = os.path.join(REPO, path)
dst1 = os.path.join(tmp_sit, path)
dst2 = os.path.join(tmp_git, path)
if os.path.isdir(src):
shutil.copytree(src, dst1)
shutil.copytree(src, dst2)
else:
shutil.copy(src, dst1)
shutil.copy(src, dst2)
# add
time_st = time.time()
args = [SIT_BIN, 'add', '.']
proc = subprocess.Popen(args, cwd=tmp_sit, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outs, errs = proc.communicate(timeout=15)
time_ed = time.time()
cur_timeused_sit_add = time_ed - time_st
time_st = time.time()
args = ['git', 'add', '.']
proc = subprocess.Popen(args, cwd=tmp_git, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outs, errs = proc.communicate(timeout=15)
time_ed = time.time()
cur_timeused_git_add = time_ed - time_st
# get commit log
args = ['git', 'log', '--pretty=%s', '-n', '1', commit]
proc = subprocess.Popen(args, cwd=REPO, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outs, errs = proc.communicate(timeout=15)
message = outs.decode('utf-8')
# write commit log
with open(os.path.join(tmp_sit, '.sit', 'COMMIT_MSG'), 'w') as f:
f.write(message)
# commit
time_st = time.time()
args = [SIT_BIN, 'commit']
proc = subprocess.Popen(args, cwd=tmp_sit, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outs, errs = proc.communicate(timeout=15)
time_ed = time.time()
cur_timeused_sit_commit = time_ed - time_st
time_st = time.time()
args = ['git', 'commit', '-m', message]
proc = subprocess.Popen(args, cwd=tmp_git, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outs, errs = proc.communicate(timeout=15)
time_ed = time.time()
cur_timeused_git_commit = time_ed - time_st
cur_timeused_sit = cur_timeused_sit_commit + cur_timeused_sit_add
cur_timeused_git = cur_timeused_git_commit + cur_timeused_git_add
timeused_sit += cur_timeused_sit
timeused_git += cur_timeused_git
timeused_sit_add += cur_timeused_sit_add
timeused_git_add += cur_timeused_git_add
timeused_sit_commit += cur_timeused_sit_commit
timeused_git_commit += cur_timeused_git_commit
print('[%d/%d] cur(%.0fms, %.0fms), tot(%.0fms, %.0fms) | cmt(%.0fms, %.0fms) | add(%.0fms, %.0fms)' %
(i, len(commits),
cur_timeused_git*1000, cur_timeused_sit*1000,
timeused_git*1000, timeused_sit*1000,
cur_timeused_git_commit*1000, cur_timeused_sit_commit*1000,
cur_timeused_git_add*1000, cur_timeused_sit_add*1000))
report.write('%.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f, %.10f\n' %
(cur_timeused_git, cur_timeused_sit, timeused_git, timeused_sit,
cur_timeused_git_commit, cur_timeused_sit_commit, timeused_git_commit, timeused_sit_commit,
cur_timeused_git_add, cur_timeused_sit_add, timeused_git_add, cur_timeused_sit_add))
report.close()
main()
|
python
|
# #####################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for #
# the specific language governing permissions and limitations under the License. #
# #####################################################################################################################
######################################################################################################################
######################################################################################################################
import logging
import uuid
from datetime import datetime
from os import getenv
import requests
from crhelper import CfnResource
logger = logging.getLogger(__name__)
helper = CfnResource(log_level=getenv("LOG_LEVEL", "WARNING"))
METRICS_ENDPOINT = "https://metrics.awssolutionsbuilder.com/generic"
def _sanitize_data(event):
resource_properties = event["ResourceProperties"]
# Remove ServiceToken (lambda arn) to avoid sending AccountId
resource_properties.pop("ServiceToken", None)
resource_properties.pop("Resource", None)
# Solution ID and unique ID are sent separately
resource_properties.pop("Solution", None)
resource_properties.pop("UUID", None)
# Add some useful fields related to stack change
resource_properties["CFTemplate"] = (
event["RequestType"] + "d"
) # Created, Updated, or Deleted
return resource_properties
@helper.create
@helper.update
@helper.delete
def send_metrics(event, _):
resource_properties = event["ResourceProperties"]
random_id = event.get("PhysicalResourceId", str(uuid.uuid4()))
helper.Data["UUID"] = random_id
try:
headers = {"Content-Type": "application/json"}
payload = {
"Solution": resource_properties["Solution"],
"UUID": random_id,
"TimeStamp": datetime.utcnow().isoformat(),
"Data": _sanitize_data(event),
}
logger.info(f"Sending payload: {payload}")
response = requests.post(METRICS_ENDPOINT, json=payload, headers=headers)
logger.info(
f"Response from metrics endpoint: {response.status_code} {response.reason}"
)
if "stackTrace" in response.text:
logger.exception("Error submitting usage data: %s" % response.text)
except requests.exceptions.RequestException:
logger.exception("Could not send usage data")
except Exception:
logger.exception("Unknown error when trying to send usage data")
return random_id
def handler(event, context):
helper(event, context) # pragma: no cover
|
python
|
from vstutils.models import BQuerySet, BModel, Manager, models
class HostQuerySet(BQuerySet):
def test_filter(self):
return self.filter(name__startswith='test_')
class Host(BModel):
objects = Manager.from_queryset(HostQuerySet)()
name = models.CharField(max_length=1024)
class HostGroup(BModel):
objects = Manager.from_queryset(HostQuerySet)()
name = models.CharField(max_length=1024)
hosts = models.ManyToManyField(Host)
parent = models.ForeignKey('self', on_delete=models.CASCADE,
related_query_name='subgroups', related_name='subgroups',
null=True, default=None, blank=True)
@property
def file(self):
return "Some value"
@property
def secret_file(self):
return "Some secret value"
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('publication_backbone', '0006_auto_20160224_1345'),
('fluent_contents', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PromoPluginModel',
fields=[
('contentitem_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='fluent_contents.ContentItem')),
('count', models.PositiveIntegerField(default=5, verbose_name='Display count')),
('order', models.CharField(default=b'date_added_asc', max_length=255, verbose_name='Display Order', blank=True, choices=[(b'date_added_asc', 'Date Added: old first'), (b'date_added_desc', 'Date Added: new first'), (b'name_desc', 'Alphabetical: descending'), (b'name_asc', 'Alphabetical')])),
('template', models.CharField(default=(b'publication_backbone/plugins/promo/promo_default.html', 'Default promo'), max_length=255, verbose_name='Template', choices=[(b'publication_backbone/plugins/promo/promo_default.html', 'Default promo'), (b'publication_backbone/plugins/promo/promo_list.html', 'List promo'), (b'publication_backbone/plugins/promo/promo.html', 'Top links promo'), (b'publication_backbone/plugins/promo/promo_bottom.html', 'Bottom links promo')])),
('categories', mptt.fields.TreeManyToManyField(related_name='fluent_promotions', null=True, verbose_name='categories', to='publication_backbone.Category', blank=True)),
],
options={
'db_table': 'contentitem_promo_promopluginmodel',
'verbose_name': 'Promo Plugin',
'verbose_name_plural': 'Promo Plugins',
},
bases=('fluent_contents.contentitem',),
),
]
|
python
|
import time
import pygame
from pygame.locals import *
# The individual event object that is returned
# This serves as a proxy to pygame's event object
# and the key field is one of the strings in the button list listed below
# in the InputManager's constructor
# This comment is actually longer than the class definition itself.
class InputEvent:
def __init__(self, key, down):
self.key = key
self.down = down
self.up = not down
# This is where all the magic happens
class InputManager:
def __init__(self):
self.init_joystick()
# I like SNES button designations. My decision to use them are arbitrary
# and are only used internally to consistently identify buttons.
# Or you could pretend that these were XBox button layout designations.
# Either way. Up to you. You could change them altogether if you want.
self.buttons = ['up', 'down', 'left', 'right', 'start', 'A', 'B', 'X', 'Y', 'L', 'R']
# If you would like there to be a keyboard fallback configuration, fill those out
# here in this mapping. If you wanted the keyboard keys to be configurable, you could
# probably copy the same sort of system I use for the joystick configuration for the
# keyboard. But that's getting fancy for a simple tutorial.
self.key_map = {
K_UP : 'up',
K_DOWN : 'down',
K_LEFT : 'left',
K_RIGHT : 'right',
K_RETURN : 'start',
K_a : 'A',
K_b : 'B',
K_x : 'X',
K_y : 'Y',
K_l : 'L',
K_r : 'R'
}
# This dictionary will tell you which logical buttons are pressed, whether it's
# via the keyboard or joystick
self.keys_pressed = {}
for button in self.buttons:
self.keys_pressed[button] = False
# This is a list of joystick configurations that will be populated during the
# configuration phase
self.joystick_config = {}
# Quitting the window is raised as an input event. And typically you also want
# that event raised when the user presses escape which is not something you
# want to configure on the joystick. That's why it's wired separately from
# everything else. When escape is pressed or the user closes the window via its
# chrome, this flag is set to True.
self.quit_attempt = False
# button is a string of the designation in the list above
def is_pressed(self, button):
return self.keys_pressed[button]
# This will pump the pygame events. If this is not called every frame,
# then the PyGame window will start to lock up.
# This is basically a proxy method for pygame's event pump and will likewise return
# a list of event proxies.
def get_events(self):
events = []
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
self.quit_attempt = True
# This is where the keyboard events are checked
if event.type == KEYDOWN or event.type == KEYUP:
key_pushed_down = event.type == KEYDOWN
button = self.key_map.get(event.key)
if button != None:
events.append(InputEvent(button, key_pushed_down))
self.keys_pressed[button] = key_pushed_down
# And this is where each configured button is checked...
for button in self.buttons:
# determine what something like "Y" actually means in terms of the joystick
config = self.joystick_config.get(button)
if config != None:
# if the button is configured to an actual button...
if config[0] == 'is_button':
pushed = self.joystick.get_button(config[1])
if pushed != self.keys_pressed[button]:
events.append(InputEvent(button, pushed))
self.keys_pressed[button] = pushed
# if the button is configured to a hat direction...
elif config[0] == 'is_hat':
status = self.joystick.get_hat(config[1])
if config[2] == 'x':
amount = status[0]
else:
amount = status[1]
pushed = amount == config[3]
if pushed != self.keys_pressed[button]:
events.append(InputEvent(button, pushed))
self.keys_pressed[button] = pushed
# if the button is configured to a trackball direction...
elif config[0] == 'is_ball':
status = self.joystick.get_ball(config[1])
if config[2] == 'x':
amount = status[0]
else:
amount = status[1]
if config[3] == 1:
pushed = amount > 0.5
else:
pushed = amount < -0.5
if pushed != self.keys_pressed[button]:
events.append(InputEvent(button, pushed))
self.keys_pressed[button] = pushed
# if the button is configured to an axis direction...
elif config[0] == 'is_axis':
status = self.joystick.get_axis(config[1])
if config[2] == 1:
pushed = status > 0.5
else:
pushed = status < -0.5
if pushed != self.keys_pressed[button]:
events.append(InputEvent(button, pushed))
self.keys_pressed[button] = pushed
return events
# Any button that is currently pressed on the game pad will be toggled
# to the button designation passed in as the 'button' parameter.
# (as long as it isn't already in use for a different button)
def configure_button(self, button):
js = self.joystick
# check buttons for activity...
for button_index in range(js.get_numbuttons()):
button_pushed = js.get_button(button_index)
if button_pushed and not self.is_button_used(button_index):
self.joystick_config[button] = ('is_button', button_index)
return True
# check hats for activity...
# (hats are the basic direction pads)
for hat_index in range(js.get_numhats()):
hat_status = js.get_hat(hat_index)
if hat_status[0] < -.5 and not self.is_hat_used(hat_index, 'x', -1):
self.joystick_config[button] = ('is_hat', hat_index, 'x', -1)
return True
elif hat_status[0] > .5 and not self.is_hat_used(hat_index, 'x', 1):
self.joystick_config[button] = ('is_hat', hat_index, 'x', 1)
return True
if hat_status[1] < -.5 and not self.is_hat_used(hat_index, 'y', -1):
self.joystick_config[button] = ('is_hat', hat_index, 'y', -1)
return True
elif hat_status[1] > .5 and not self.is_hat_used(hat_index, 'y', 1):
self.joystick_config[button] = ('is_hat', hat_index, 'y', 1)
return True
# check trackballs for activity...
# (I don't actually have a gamepad with a trackball on it. So this code
# is completely untested! Let me know if it works and is typo-free.)
for ball_index in range(js.get_numballs()):
ball_status = js.get_ball(ball_index)
if ball_status[0] < -.5 and not self.is_ball_used(ball_index, 'x', -1):
self.joystick_config[button] = ('is_ball', ball_index, 'x', -1)
return True
elif ball_status[0] > .5 and not self.is_ball_used(ball_index, 'x', 1):
self.joystick_config[button] = ('is_ball', ball_index, 'x', 1)
return True
if ball_status[1] < -.5 and not self.is_ball_used(ball_index, 'y', -1):
self.joystick_config[button] = ('is_ball', ball_index, 'y', -1)
return True
elif ball_status[1] > .5 and not self.is_ball_used(ball_index, 'y', 1):
self.joystick_config[button] = ('is_ball', ball_index, 'y', 1)
return True
# check axes for activity...
# (that's plural of axis. Not a tree chopping tool. Although a USB Axe would be awesome!)
for axis_index in range(js.get_numaxes()):
axis_status = js.get_axis(axis_index)
if axis_status < -.5 and not self.is_axis_used(axis_index, -1):
self.joystick_config[button] = ('is_axis', axis_index, -1)
return True
elif axis_status > .5 and not self.is_axis_used(axis_index, 1):
self.joystick_config[button] = ('is_axis', axis_index, 1)
return True
return False
# The following 4 methods are helper methods used by the above method
# to determine if a particular button/axis/hat/trackball are already
# configured to a particular button designation
def is_button_used(self, button_index):
for button in self.buttons:
config = self.joystick_config.get(button)
if config != None and config[0] == 'is_button' and config[1] == button_index:
return True
return False
def is_hat_used(self, hat_index, axis, direction):
for button in self.buttons:
config = self.joystick_config.get(button)
if config != None and config[0] == 'is_hat':
if config[1] == hat_index and config[2] == axis and config[3] == direction:
return True
return False
def is_ball_used(self, ball_index, axis, direction):
for button in self.buttons:
config = self.joystick_config.get(button)
if config != None and config[0] == 'is_ball':
if config[1] == ball_index and config[2] == axis and config[3] == direction:
return True
return False
def is_axis_used(self, axis_index, direction):
for button in self.buttons:
config = self.joystick_config.get(button)
if config != None and config[0] == 'is_axis':
if config[1] == axis_index and config[2] == direction:
return True
return False
# Set joystick information.
# The joystick needs to be plugged in before this method is called (see main() method)
def init_joystick(self):
joystick = pygame.joystick.Joystick(0)
joystick.init()
self.joystick = joystick
self.joystick_name = joystick.get_name()
# A simple player object. This only keeps track of position.
class Player:
def __init__(self):
self.x = 320
self.y = 240
self.speed = 4
def move_left(self):
self.x -= self.speed
def move_right(self):
self.x += self.speed
def move_up(self):
self.y -= self.speed
def move_down(self):
self.y += self.speed
# The main method...duh!
def main():
fps = 30
print("Plug in a USB gamepad. Do it! Do it now! Press enter after you have done this.")
wait_for_enter()
pygame.init()
num_joysticks = pygame.joystick.get_count()
if num_joysticks < 1:
print("You didn't plug in a joystick. FORSHAME!")
return
input_manager = InputManager()
screen = pygame.display.set_mode((640, 480))
button_index = 0
player = Player()
# The main game loop
while not input_manager.quit_attempt:
start = time.time()
screen.fill((0,0,0))
# There will be two phases to our "game".
is_configured = button_index >= len(input_manager.buttons)
# In the first phase, the user will be prompted to configure the joystick by pressing
# the key that is indicated on the screen
# You would probably do this in an input menu in your real game.
if not is_configured:
success = configure_phase(screen, input_manager.buttons[button_index], input_manager)
# if the user pressed a button and configured it...
if success:
# move on to the next button that needs to be configured
button_index += 1
# In the second phase, the user will control a "character" on the screen (which will
# be represented by a simple blue ball) that obeys the directional commands, whether
# it's from the joystick or the keyboard.
else:
interaction_phase(screen, player, input_manager)
pygame.display.flip()
# maintain frame rate
difference = start - time.time()
delay = 1.0 / fps - difference
if delay > 0:
time.sleep(delay)
def configure_phase(screen, button, input_manager):
# need to pump windows events otherwise the window will lock up and die
input_manager.get_events()
# configure_button looks at the state of ALL buttons pressed on the joystick
# and will map the first pressed button it sees to the current button you pass
# in here.
success = input_manager.configure_button(button)
# tell user which button to configure
write_text(screen, "Press the " + button + " button", 100, 100)
# If a joystick button was successfully configured, return True
return success
def interaction_phase(screen, player, input_manager):
# I dunno. This doesn't do anything. But this is how
# you would access key hit events and the like.
# Ideal for "shooting a weapon" or "jump" sort of events
for event in input_manager.get_events():
if event.key == 'A' and event.down:
pass # weeeeeeee
if event.key == 'X' and event.up:
input_manager.quit_attempted = True
# ...but for things like "move in this direction", you want
# to know if a button is pressed and held
if input_manager.is_pressed('left'):
player.move_left()
elif input_manager.is_pressed('right'):
player.move_right()
if input_manager.is_pressed('up'):
player.move_up()
elif input_manager.is_pressed('down'):
player.move_down()
# Draw the player
pygame.draw.circle(screen, (0, 0, 255), (player.x, player.y), 20)
# There was probably a more robust way of doing this. But
# command line interaction was not the point of the tutorial.
def wait_for_enter():
try: input()
except: pass
# This renders text on the game screen.
# Also not the point of this tutorial.
cached_text = {}
cached_font = None
def write_text(screen, text, x, y):
global cached_text, cached_font
image = cached_text.get(text)
if image == None:
if cached_font == None:
cached_font = pygame.font.Font(pygame.font.get_default_font(), 12)
image = cached_font.render(text, True, (255, 255, 255))
cached_text[text] = image
screen.blit(image, (x, y - image.get_height()))
# Kick things off.
main()
# fin.
|
python
|
from interface import GameWindow
from PyQt5.QtWidgets import QApplication
import sys
if __name__ == "__main__":
app = QApplication(sys.argv)
demo = GameWindow()
demo.update()
demo.showMaximized()
sys.exit(app.exec_())
|
python
|
# Okta intel module - Group
import json
import logging
from typing import Dict
from typing import List
from typing import Tuple
import neo4j
from okta.framework.ApiClient import ApiClient
from okta.framework.OktaError import OktaError
from okta.framework.PagedResults import PagedResults
from okta.models.usergroup import UserGroup
from cartography.intel.okta.sync_state import OktaSyncState
from cartography.intel.okta.utils import create_api_client
from cartography.intel.okta.utils import is_last_page
from cartography.util import timeit
logger = logging.getLogger(__name__)
@timeit
def _get_okta_groups(api_client: ApiClient) -> List[str]:
"""
Get groups from Okta server
:param api_client: Okta api client
:return: Array of group information
"""
group_list: List[str] = []
next_url = None
# SDK Bug
# get_paged_groups returns User object instead of UserGroup
while True:
# https://developer.okta.com/docs/reference/api/groups/#list-groups
if next_url:
paged_response = api_client.get(next_url)
else:
params = {
'limit': 10000,
}
paged_response = api_client.get_path('/', params)
paged_results = PagedResults(paged_response, UserGroup)
group_list.extend(paged_results.result)
if not is_last_page(paged_response):
next_url = paged_response.links.get("next").get("url")
else:
break
return group_list
@timeit
def get_okta_group_members(api_client: ApiClient, group_id: str) -> List[Dict]:
"""
Get group members from Okta server
:param api_client: Okta api client
:param group_id: group to fetch members from
:return: Array or group membership information
"""
member_list: List[Dict] = []
next_url = None
while True:
try:
# https://developer.okta.com/docs/reference/api/groups/#list-group-members
if next_url:
paged_response = api_client.get(next_url)
else:
params = {
'limit': 1000,
}
paged_response = api_client.get_path(f'/{group_id}/users', params)
except OktaError:
logger.error(f"OktaError while listing members of group {group_id}")
raise
member_list.extend(json.loads(paged_response.text))
if not is_last_page(paged_response):
next_url = paged_response.links.get("next").get("url")
else:
break
return member_list
@timeit
def transform_okta_group_list(okta_group_list: List[UserGroup]) -> Tuple[List[Dict], List[str]]:
groups: List[Dict] = []
groups_id: List[str] = []
for current in okta_group_list:
groups.append(transform_okta_group(current))
groups_id.append(current.id)
return groups, groups_id
def transform_okta_group(okta_group: UserGroup) -> Dict:
"""
Transform okta group object to consumable dictionary for graph
:param okta_group: okta group object
:return: Dictionary representing the group properties
"""
# https://github.com/okta/okta-sdk-python/blob/master/okta/models/usergroup/UserGroup.py
group_props = {}
group_props["id"] = okta_group.id
group_props["name"] = okta_group.profile.name
group_props["description"] = okta_group.profile.description
if okta_group.profile.samAccountName:
group_props["sam_account_name"] = okta_group.profile.samAccountName
else:
group_props["sam_account_name"] = None
if okta_group.profile.dn:
group_props["dn"] = okta_group.profile.dn
else:
group_props["dn"] = None
if okta_group.profile.windowsDomainQualifiedName:
group_props["windows_domain_qualified_name"] = okta_group.profile.windowsDomainQualifiedName
else:
group_props["windows_domain_qualified_name"] = None
if okta_group.profile.externalId:
group_props["external_id"] = okta_group.profile.externalId
else:
group_props["external_id"] = None
return group_props
def transform_okta_group_member_list(okta_member_list: List[Dict]) -> List[Dict]:
"""
Only include fields that we care about in the Okta object sent to Neo4j to avoid network issues.
"""
transformed_member_list: List[Dict] = []
for user in okta_member_list:
transformed_member_list.append({
'first_name': user['profile']['firstName'],
'last_name': user['profile']['lastName'],
'login': user['profile']['login'],
'email': user['profile']['email'],
'second_email': user['profile'].get('secondEmail'),
'id': user['id'],
'created': user['created'],
'activated': user.get('activated'),
'status_changed': user.get('status_changed'),
'last_login': user.get('last_login'),
'okta_last_updated': user.get('okta_last_updated'),
'password_changed': user.get('password_changed'),
'transition_to_status': user.get('transitioningToStatus'),
})
return transformed_member_list
@timeit
def _load_okta_groups(
neo4j_session: neo4j.Session, okta_org_id: str, group_list: List[Dict],
okta_update_tag: int,
) -> None:
"""
Add okta groups to the graph
:param neo4j_session: session with the Neo4j server
:param okta_org_id: okta organization id
:param group_list: group of list
:param okta_update_tag: The timestamp value to set our new Neo4j resources with
:return: Nothing
"""
ingest_statement = """
MATCH (org:OktaOrganization{id: {ORG_ID}})
WITH org
UNWIND {GROUP_LIST} as group_data
MERGE (new_group:OktaGroup{id: group_data.id})
ON CREATE SET new_group.firstseen = timestamp()
SET new_group.name = group_data.name,
new_group.description = group_data.description,
new_group.sam_account_name = group_data.sam_account_name,
new_group.dn = group_data.dn,
new_group.windows_domain_qualified_name = group_data.windows_domain_qualified_name,
new_group.external_id = group_data.external_id,
new_group.lastupdated = {okta_update_tag}
WITH new_group, org
MERGE (org)-[org_r:RESOURCE]->(new_group)
ON CREATE SET org_r.firstseen = timestamp()
SET org_r.lastupdated = {okta_update_tag}
"""
neo4j_session.run(
ingest_statement,
ORG_ID=okta_org_id,
GROUP_LIST=group_list,
okta_update_tag=okta_update_tag,
)
@timeit
def load_okta_group_members(
neo4j_session: neo4j.Session, group_id: str, member_list: List[Dict],
okta_update_tag: int,
) -> None:
"""
Add group membership data into the graph
:param neo4j_session: session with the Neo4j server
:param group_id: group id to map
:param member_list: group members
:param okta_update_tag: The timestamp value to set our new Neo4j resources with
:return: Nothing
"""
ingest = """
MATCH (group:OktaGroup{id: {GROUP_ID}})
WITH group
UNWIND {MEMBER_LIST} as member
MERGE (user:OktaUser{id: member.id})
ON CREATE SET user.firstseen = timestamp(),
user.first_name = member.first_name,
user.last_name = member.last_name,
user.login = member.login,
user.email = member.email,
user.second_email = member.second_email,
user.created = member.created,
user.activated = member.activated,
user.status_changed = member.status_changed,
user.last_login = member.last_login,
user.okta_last_updated = member.okta_last_updated,
user.password_changed = member.password_changed,
user.transition_to_status = member.transition_to_status,
user.lastupdated = {okta_update_tag}
MERGE (user)-[r:MEMBER_OF_OKTA_GROUP]->(group)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = {okta_update_tag}
"""
logging.info(f'Loading {len(member_list)} members of group {group_id}')
neo4j_session.run(
ingest,
GROUP_ID=group_id,
MEMBER_LIST=member_list,
okta_update_tag=okta_update_tag,
)
@timeit
def sync_okta_group_membership(
neo4j_session: neo4j.Session, api_client: ApiClient, group_list_info: List[Dict],
okta_update_tag: int,
) -> None:
"""
Map group members in the graph
:param neo4j_session: session with the Neo4j server
:param api_client: Okta api client
:param group_list_info: Group information as list
:param okta_update_tag: The timestamp value to set our new Neo4j resources with
:return: Nothing
"""
for group_info in group_list_info:
group_id = group_info["id"]
members_data: List[Dict] = get_okta_group_members(api_client, group_id)
transformed_member_data: List[Dict] = transform_okta_group_member_list(members_data)
load_okta_group_members(neo4j_session, group_id, transformed_member_data, okta_update_tag)
@timeit
def sync_okta_groups(
neo4_session: neo4j.Session, okta_org_id: str, okta_update_tag: int, okta_api_key: str,
sync_state: OktaSyncState,
) -> None:
"""
Synchronize okta groups
:param neo4_session: session with the Neo4j server
:param okta_org_id: okta organization id
:param okta_update_tag: The timestamp value to set our new Neo4j resources with
:param okta_api_key: Okta API key
:param sync_state: Okta sync state
:return: Nothing
"""
logger.info("Syncing Okta groups")
api_client = create_api_client(okta_org_id, "/api/v1/groups", okta_api_key)
okta_group_data = _get_okta_groups(api_client)
group_list_info, group_ids = transform_okta_group_list(okta_group_data)
# store result for later use
sync_state.groups = group_ids
_load_okta_groups(neo4_session, okta_org_id, group_list_info, okta_update_tag)
sync_okta_group_membership(neo4_session, api_client, group_list_info, okta_update_tag)
|
python
|
import time
import requests
from models.states import OrderSide
class DeribitExchangeInterface:
def __init__(self, key, secret, base_url, api_url, instrument):
self.key = key
self.secret = secret
self.base_url = base_url
self.api_url = api_url
self.url = base_url + api_url
self.access_token = None
self.refresh_token = None
self.expires_in = 0
self.instrument = instrument
def _auth(self):
method = 'public/auth'
params = {
'grant_type': 'client_credentials',
'client_secret': self.secret,
'client_id': self.key,
'scope': 'session:micropython'
}
response = self._post(method, params)
if response:
self.access_token = response['access_token']
self.refresh_token = response['refresh_token']
self.expires_in = time.time() + response['expires_in']
def _post(self, method, params):
url = self.url + method
headers = {'Content-Type': 'application/json'}
data = {
'method': method,
'params': params
}
if method != 'public/auth' and self.expires_in < time.time():
self._auth()
if method.startswith('private'):
headers['Authorization'] = 'Bearer {}'.format(self.access_token)
try:
response = requests.post(url=url, headers=headers, json=data)
except Exception as r:
self.logger.info(r)
if response.status_code != 200:
raise Exception(f"Wrong response code: {response.status_code}",
f"{response.text}")
return response.json()['result']
def get_positions(self):
method = 'private/get_position'
params = {'instrument_name': self.instrument}
result = self._post(method, params)
return {'average_price': result.get('average_price'),
'size': result.get('size', 0)}
def get_get_mark_price(self):
# для опционов
method = 'public/get_book_summary_by_instrument'
params = {'instrument_name': self.instrument}
result = self._post(method, params)
return result[0]['mark_price'] if result else None
def get_last_trade_price(self):
if self.instrument.endswith("C") or self.instrument.endswith("P"):
return self.get_get_mark_price()
method = 'public/get_last_trades_by_instrument'
params = {'instrument_name': self.instrument, 'count': 1}
result = self._post(method, params)
return result['trades'][0]['price'] if result else None
def get_last_order_price(self, side):
method = 'private/get_order_history_by_instrument'
params = {'instrument_name': self.instrument, 'count': 1}
last_order_price = [order['price'] for order
in self._post(method, params)
if order['direction'] == side]
return last_order_price[0] if len(last_order_price) > 0 else self.get_last_trade_price()
def get_order_params_from_responce(self, responce):
return {'price': responce.get('price'),
'size': responce.get('amount'),
'side': responce.get('direction'),
'order_id': responce.get('order_id'),
'status': responce.get('order_state'),
'timestamp': responce.get('last_update_timestamp')}
def get_open_orders(self):
method = 'private/get_open_orders_by_instrument'
params = {'instrument_name': self.instrument}
open_orders = self._post(method, params)
return [self.get_order_params_from_responce(order) for order in open_orders]
def get_order_state(self, order_id):
method = 'private/get_order_state'
params = {'order_id': order_id}
try:
order = self._post(method, params)
except Exception as err:
order = {'order_id': order_id, 'order_state': 'cancelled'}
return self.get_order_params_from_responce(order)
def get_orders_state(self, orders_state):
open_orders = self.get_open_orders()
open_order_ids = [open_order.get('order_id') for open_order in open_orders]
order_state_ids = [order_id for order_id in orders_state if order_id not in open_order_ids]
return open_orders + [self.get_order_state(order_id) for order_id in order_state_ids]
def create_order(self, order):
method = 'private/buy' if order['side'] == OrderSide.buy else 'private/sell'
params = {
'instrument_name': self.instrument,
'amount': order['size'],
'price': order['price'],
'post_only': 'true',
'time_in_force': 'good_til_cancelled',
'type': 'limit',
}
order = self._post(method, params)
return self.get_order_params_from_responce(order.get('order'))
def cancel_all_orders(self):
method = 'private/cancel_all_by_instrument'
params = {'instrument_name': self.instrument, 'type': 'all'}
result = self._post(method, params)
return result
def cancel_order(self, order_id):
method = 'private/cancel'
params = {'order_id': order_id}
result = self._post(method, params)
return result
|
python
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
DT = 0.01
FRAMERATE = 60
N_ROWS = 64
SECONDS = 10
def read_field_file(file_path, type):
if type != 'scalar' and type != 'vector':
raise ValueError('type must be scalar or vector')
file_str = open(file_path, 'r').read()
frame_arr = file_str.split('\n\n')
frame_arr = [frame for frame in frame_arr if frame]
frame_arr = [frame.split('\n') for frame in frame_arr]
frame_arr = [[row.split(' ') for row in frame] for frame in frame_arr]
if type == 'scalar':
frame_arr = [[[float(item) for item in row] for row in frame] for frame in frame_arr]
elif type == 'vector':
def string_to_vector(string):
string = string.replace('(', '')
string = string.replace(')', '')
pair = tuple(string.split(','))
pair = (float(pair[0]), float(pair[1]))
return pair
frame_arr = [[[string_to_vector(item) for item in row] for row in frame] for frame in frame_arr]
frame_arr = np.array(frame_arr)
return frame_arr
def read_velocity():
return read_field_file('velocity.txt', 'vector')
def read_pressure():
return read_field_file('pressure.txt', 'scalar')
def read_divergence(absolute = True):
divergence = read_field_file('divergence.txt', 'scalar')
if(absolute):
divergence = np.abs(divergence)
return divergence
def read_color():
return read_field_file('color.txt', 'scalar')
velocity_frames = read_velocity()
pressure_frames = read_pressure()
color_frames = read_color()
divergence_frames = read_divergence()
frame_interval = 1000//FRAMERATE
frame_count = velocity_frames.shape[0]
fig, (ax1, ax2, ax3) = plt.subplots(1, 3)
ax1.set_title('Pressure and Velocity')
ax2.set_title('Color')
ax3.set_title('Absolute Divergence (Bad!)')
artists = []
foo = np.random.random(size=(64, 64))
artists.append(ax1.quiver(foo, foo, scale=100, scale_units='inches', color='blue'))
artists.append(ax1.imshow(foo, cmap='hot', interpolation='nearest', vmin=-2, vmax=2, animated=True))
artists.append(ax2.imshow(foo, interpolation='nearest', vmin=0, vmax=1, animated=True))
artists.append(ax3.imshow(foo, cmap='hot', interpolation='nearest', vmin=0, vmax=1, animated=True))
def update(i):
u = velocity_frames[i, :, :, 0]
v = velocity_frames[i, :, :, 1]
pressure_frame = pressure_frames[i, :, :]
color_frame = color_frames[i, :, :]
divergence_frame = divergence_frames[i, :, :]
artists[0].set_UVC(u, v)
artists[1].set_array(pressure_frame)
artists[2].set_array(color_frame)
artists[3].set_array(divergence_frame)
return artists
ani = animation.FuncAnimation(fig, update, frames=frame_count, interval=frame_interval, blit=True)
plt.show()
|
python
|
import requests
from abc import ABC, abstractmethod
from collections import OrderedDict
from coronacli.config import OWID_DATA_URL
class BaseScraper(ABC):
""" Abstract scraper class defining common functionality among all scrapers and abstract methods to implement """
def __init__(self):
super().__init__()
@staticmethod
def get_data(url):
""" Submits HTTP GET request to extract data from given url
:param url - an HTTP URL to submit the request to
:returns the result of the GET request
"""
# TODO add exception handling/retries
return requests.get(url)
@staticmethod
def get_text(request_data):
""" Given a result from some request to a URL, this method extracts the relevant site text from it
:param request_data - the data returned from a request such as request.get
:returns the text contained within that result
"""
return request_data.text
@abstractmethod
def _extract_data(self, data_dict):
pass
@abstractmethod
def scrape(self, url):
pass
class OurWorldInDataScraper(BaseScraper):
""" Extracts country information and COVID-19 cases by country from
https://github.com/owid/covid-19-data/tree/master/public/data
Parses these data by splitting the information into two distinct collections to pass downstream
"""
def __init__(self):
self.owid_covid_data = {}
self.owid_country_data = {}
super().__init__()
def _extract_countries_object(self, country_code, country_obj):
country_obj.pop("data")
country_obj["country_code"] = country_code
self.owid_country_data[country_code] = country_obj
def _extract_covid_object(self, country_code, country_obj):
covid_data = country_obj["data"]
self.owid_covid_data[country_code] = covid_data
def _extract_data(self, data_dict):
for country_code, country_obj in data_dict.items():
self._extract_covid_object(country_code, country_obj)
self._extract_countries_object(country_code, country_obj)
def scrape(self, url=OWID_DATA_URL):
""" Performs the necessary calls to request data from given URL, parse it, and extract the relevant items
:param url - the URL which contains the Our World In Data COVID-19 dataset (defaults to config)
:returns data on COVID-19 cases and country information from OWID dataset
"""
import json
# Get the JSON string from the given URL and parse into Python dictionary
data = self.get_data(url)
data_text = self.get_text(data)
data_dict = json.loads(data_text, object_pairs_hook=OrderedDict)
# Parse the resulting dictionary obtained from the JSON at given URL
self._extract_data(data_dict)
return self.owid_covid_data, self.owid_country_data
def get_scraper(name):
""" Returns the relevant class by the given name ala factory pattern
:param name - the name of the scraper to return
:returns the relevant scraper class for the given scraper name
"""
supported_scrapers = {"OurWorldInData": OurWorldInDataScraper}
try:
scraper_object = supported_scrapers[name]
except KeyError:
raise KeyError("{0} is not a supported scraper".format(name))
return scraper_object
|
python
|
from __future__ import print_function
import collections
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
# The GPU id to use, usually either "0" or "1"
os.environ["CUDA_VISIBLE_DEVICES"]="3"
import tensorflow as tf
from keras.models import Sequential, load_model
from keras.utils import multi_gpu_model
from keras.layers import Dense, Activation, Embedding, Dropout, TimeDistributed
from keras.layers import LSTM, Multiply, Merge
from keras.optimizers import Adam, Adagrad
from keras.utils import to_categorical, plot_model
from keras.callbacks import ModelCheckpoint
from keras.layers import GlobalAveragePooling1D
import numpy as np
import argparse
import gensim
import json
from keras.callbacks import Callback
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
import pickle
class Metrics(Callback):
def on_train_begin(self, logs={}):
self.val_f1s = []
self.val_recalls = []
self.val_precisions = []
def on_epoch_end(self, epoch, logs={}):
val_predict = (np.asarray(parallel_model.predict([val_sent_e, val_claim_e]))).round()
val_targ = val_y
_val_f1 = f1_score(val_targ, val_predict)
_val_recall = recall_score(val_targ, val_predict)
_val_precision = precision_score(val_targ, val_predict)
self.val_f1s.append(_val_f1)
self.val_recalls.append(_val_recall)
self.val_precisions.append(_val_precision)
print (' — val_f1: %f — val_precision: %f — val_recall %f' %( _val_f1, _val_precision, _val_recall))
return
metrics = Metrics()
#model = gensim.models.KeyedVectors.load_word2vec_format('~/GoogleNews-vectors-negative300.bin', binary=True)
model = dict()
with open('../../data/google/files/birth-place_label', 'r') as f:
labels_org = f.read()
with open('../../data/google/files/birth-place_sentence', 'r') as f:
sentences = f.read()
with open('../../data/google/files/birth-place_claim', 'r') as f:
claims = f.read()
from string import punctuation
all_text = ''.join([c for c in sentences if c not in punctuation])
sentences = all_text.split('\n')
all_text = ''.join([c for c in claims if c not in punctuation])
claims = all_text.split('\n')
all_text = ' '.join(claims)
all_text += ' '.join(sentences)
words = all_text.lower().split()
# changing here
words = list(set(words))
vocab_to_int = dict()
for i in range(len(words)):
vocab_to_int.update({words[i]: i})
# from collections import Counter
# counts = Counter(words)
# vocab = sorted(counts, key=counts.get, reverse=True)
# vocab_to_int = {word: ii for ii, word in enumerate(vocab, 1)}
sent_ints = []
for each in sentences:
each = each.lower()
sent_ints.append([vocab_to_int[word] for word in each.split()])
claim_ints = []
for each in claims:
each = each.lower()
claim_ints.append([vocab_to_int[word] for word in each.split()])
labels = np.array([1 if l == "Positive" else 0 for l in labels_org.split()])
from collections import Counter
claim_lens = Counter([len(x) for x in claim_ints])
sent_lens = Counter([len(x) for x in sent_ints])
print("Zero-length sentences: {}".format(sent_lens[0]))
print("Maximum sentence length: {}".format(max(sent_lens)))
print("Zero-length claims: {}".format(claim_lens[0]))
print("Maximum claim length: {}".format(max(claim_lens)))
# Filter out that review with 0 length
#claim_ints = [r for r in claim_ints if len(r) > 0]
#sent_ints = [r[0:500] for r in sent_ints if len(r) > 0]
tc = []
ts = []
tl = []
for i in range(len(labels)):
if len(claim_ints[i])*len(sent_ints[i]) > 0:
tc.append(claim_ints[i])
ts.append(sent_ints[i])
tl.append(labels[i])
claim_ints = np.array(tc)
sent_ints = np.array(ts)
labels = np.array(tl)
from collections import Counter
claim_lens = Counter([len(x) for x in claim_ints])
print("Zero-length claims: {}".format(claim_lens[0]))
print("Maximum claim length: {}".format(max(claim_lens)))
sent_lens = Counter([len(x) for x in sent_ints])
print("Zero-length sents: {}".format(sent_lens[0]))
print("Maximum sent length: {}".format(max(sent_lens)))
mx_sent = max(sent_lens)
mx_claim = max(claim_lens)
#mx_sent = 400
claim_seq_len = mx_claim
sent_seq_len = mx_sent
claim_features = np.zeros((len(claim_ints), claim_seq_len), dtype=int)
sent_features = np.zeros((len(sent_ints), sent_seq_len), dtype=int)
for i, row in enumerate(claim_ints):
claim_features[i, -len(row):] = np.array(row)[:claim_seq_len]
for i, row in enumerate(sent_ints):
sent_features[i, -len(row):] = np.array(row)[:sent_seq_len]
split_frac = 0.9
split_index = int(split_frac * len(claim_features))
train_claim, val_claim = claim_features[:split_index], claim_features[split_index:]
train_sent, val_sent = sent_features[:split_index], sent_features[split_index:]
train_y, val_y = labels[:split_index], labels[split_index:]
#split_frac = 1
#split_index = int(split_frac * len(val_claim))
#val_claim, test_claim = val_claim[:split_index], val_claim[split_index:]
#val_sent, test_sent = val_sent[:split_index], val_sent[split_index:]
#val_y, test_y = val_y[:split_index], val_y[split_index:]
n_words = len(vocab_to_int) + 1 # Add 1 for 0 added to vocab
embed_size = 300
w2v_embed = np.ndarray([n_words, embed_size])
for i in range(n_words - 1):
if words[i] not in model:
w2v_embed[vocab_to_int[words[i]]] = np.array([0] * embed_size)
else:
w2v_embed[vocab_to_int[words[i]]] = model[words[i]]
#with open('bplace.pkl','wb') as f:
# pickle.dump(w2v_embed,f)
with open('bplace.pkl','rb') as f:
w2v_embed = pickle.load(f)
import random
idx = random.sample(range(len(train_claim)), len(train_claim))
train_claim_s = []
train_sent_s = []
train_y_s = []
for i in idx:
train_claim_s.append(train_claim[i])
train_sent_s.append(train_sent[i])
train_y_s.append(train_y[i])
train_claim = np.array(train_claim_s)
train_sent = np.array(train_sent_s)
train_y = np.array(train_y_s)
#test_claim = np.array(test_claim)
#test_sent = np.array(test_sent)
#test_y = np.array(test_y)
train_claim_e = np.ndarray((len(train_claim), mx_claim, embed_size))
train_sent_e = np.ndarray((len(train_sent), mx_sent, embed_size))
for i in range(len(train_claim)):
for j in range(mx_claim):
train_claim_e[i][j][:] = w2v_embed[train_claim[i][j]]
for i in range(len(train_sent)):
for j in range(mx_sent):
train_sent_e[i][j][:] = w2v_embed[train_sent[i][j]]
val_claim_e = np.ndarray((len(val_claim), mx_claim, embed_size))
val_sent_e = np.ndarray((len(val_sent), mx_sent, embed_size))
for i in range(len(val_claim)):
for j in range(mx_claim):
val_claim_e[i][j][:] = w2v_embed[val_claim[i][j]]
for i in range(len(val_sent)):
for j in range(mx_sent):
val_sent_e[i][j][:] = w2v_embed[val_sent[i][j]]
hidden_size = 256
use_dropout = True
vocabulary = n_words
embedding_layer = Embedding(input_dim=vocabulary, output_dim=300)
lstm_out = 150
drop = 0.5
model1 = Sequential()
#model1.add(embedding_layer)
#model1.add(Embedding(vocabulary, embed_size, input_length=mx_sent))
model1.add(LSTM(lstm_out, return_sequences=False, input_shape=(mx_sent, embed_size)))
#model1.add(LSTM(embed_size, return_sequences=True))
#model1.add(GlobalAveragePooling1D())
#model1.add(TimeDistributed(Dense(1)))
#model1.add(LSTM(embed_size, return_sequences=False))
if use_dropout:
model1.add(Dropout(drop))
model1.add(Dense(lstm_out, activation='sigmoid', name='out1'))
model2 = Sequential()
#model2.add(embedding_layer)
#model2.add(Embedding(vocabulary, embed_size, input_length=mx_claim))
#model2.add(LSTM(embed_size, return_sequences=True))
model2.add(LSTM(lstm_out, return_sequences=False, input_shape=(mx_claim, embed_size)))
#model2.add(LSTM(embed_size, return_sequences=True))
#model2.add(LSTM(embed_size, return_sequences = False))
#model2.add(GlobalAveragePooling1D())
#model2.add(TimeDistributed(Dense(1)))
if use_dropout:
model2.add(Dropout(drop))
model2.add(Dense(lstm_out, activation='sigmoid', name='out2'))
model = Sequential()
model.add(Merge([model1, model2], mode='mul'))
#model.add(Dense(600))
#model.add(Dense(600))
#model.add(Dense(300))
#model.add(Dense(100))
#model.add(Merge([model1, model2], mode='cos', dot_axes=1))
model.add(Dense(1, activation = 'sigmoid'))
# model = Multiply()([model1.get_layer('out1').output,model2.get_layer('out2').output])
# model.add(TimeDistributed(Dense(vocabulary)))
# model.add(Activation('softmax'))
#optimizer = Adam()
# model1.compile(loss='mean_squared_error', optimizer='adam')
# parallel_model = multi_gpu_model(model, gpus=2)
parallel_model = model
parallel_model.compile(loss='binary_crossentropy', optimizer=Adagrad(lr=0.001), metrics=['acc'])
#parallel_model.compile(loss='mean_squared_error', optimizer='adam' )
print(model.summary())
print(model1.summary())
print(model2.summary())
# checkpointer = ModelCheckpoint(filepath=data_path + '/model-{epoch:02d}.hdf5', verbose=1)
num_epochs = 100
plot_model(parallel_model, to_file='model.png')
parallel_model.fit(x=[train_sent_e, train_claim_e], y=train_y, batch_size=128, epochs=num_epochs,
validation_data=([val_sent_e,val_claim_e],val_y), callbacks = [metrics])
#print(parallel_model.predict([val_sent_e[:100], val_claim_e[:100]]), val_y[:100])
#print(parallel_model.evaluate([val_sent_e,val_claim_e],val_y))
#parallel_model.save("final_model.hdf5")
|
python
|
from gym_trading.envs.trading_env import TradingEnv
from gym_trading.envs.Testing_Env import TestingEnv
|
python
|
import unittest
import numpy as np
from materialscoord.einstein_crystal_perturbation import perturb_einstein_crystal
from pymatgen.core.structure import Structure
class EinsteinTest(unittest.TestCase):
"""Test einstein perturbation functionality."""
def test_perturb(self):
# basic test to check structures are not equal after perturbation
structure = Structure.from_spacegroup(
225,
[[5.7, 0, 0], [0, 5.7, 0], [0, 0, 5.7]],
["Na1+", "Cl1-"],
[[0, 0, 0], [0.5, 0, 0]],
)
perturb_structure = perturb_einstein_crystal(structure)
orig_sites = [s.frac_coords for s in structure]
perturb_sites = [s.frac_coords for s in perturb_structure]
# check coord arrays are not almost equal
self.assertRaises(
AssertionError,
np.testing.assert_array_almost_equal,
orig_sites,
perturb_sites,
)
|
python
|
class Config(object):
DEBUG = False
TESTING = False
class DevConfig(Config):
DEBUG = False
|
python
|
class MyPolynomialDecay:
"""
Class to define a Polynomial Decay for the learning rate
"""
def __init__(self, max_epochs=100, init_lr=1e-3, power=1.0):
"""
Class constructor.
:param max_epochs (int): Max number of epochs
:param init_lr (float): Initial Learning Rate
:param power (float): Power
"""
# store the maximum number of epochs, base learning rate,
# and power of the polynomial
self.max_epochs = max_epochs
self.init_lr = init_lr
self.power = power
def __call__(self, epoch):
"""
Called by the callback.
:param epoch (int): Current epoch
:return updated_lr (float): Updated LR
"""
# compute the new learning rate based on polynomial decay
decay = (1 - (epoch / float(self.max_epochs))) ** self.power
alpha = self.init_lr * decay
# return the new learning rate
return float(alpha)
|
python
|
import tensorflow as tf
from niftynet.application.base_application import BaseApplication
from niftynet.engine.application_factory import \
ApplicationNetFactory, InitializerFactory, OptimiserFactory
from niftynet.engine.application_variables import \
CONSOLE, NETWORK_OUTPUT, TF_SUMMARIES
from niftynet.engine.sampler_grid_v2 import GridSampler
from niftynet.engine.sampler_resize_v2 import ResizeSampler
from niftynet.engine.sampler_uniform_v2 import UniformSampler
from niftynet.engine.sampler_weighted_v2 import WeightedSampler
from niftynet.engine.sampler_balanced_v2 import BalancedSampler
from niftynet.engine.windows_aggregator_grid import GridSamplesAggregator
from niftynet.engine.windows_aggregator_resize import ResizeSamplesAggregator
from niftynet.io.image_reader import ImageReader
from niftynet.layer.binary_masking import BinaryMaskingLayer
from niftynet.layer.discrete_label_normalisation import \
DiscreteLabelNormalisationLayer
from niftynet.layer.histogram_normalisation import \
HistogramNormalisationLayer
from niftynet.layer.loss_segmentation import LossFunction
from niftynet.layer.mean_variance_normalisation import \
MeanVarNormalisationLayer
from niftynet.layer.pad import PadLayer
from niftynet.layer.post_processing import PostProcessingLayer
from niftynet.layer.rand_flip import RandomFlipLayer
from niftynet.layer.rand_rotation import RandomRotationLayer
from niftynet.layer.rand_spatial_scaling import RandomSpatialScalingLayer
from niftynet.evaluation.segmentation_evaluator import SegmentationEvaluator
from niftynet.layer.rand_elastic_deform import RandomElasticDeformationLayer
import pandas as pd
from itertools import chain, combinations
import random
import numpy as np
from multi_modal_application import MultiModalApplication
SUPPORTED_INPUT = set(['image', 'label', 'weight', 'sampler', 'inferred', 'choices', 'output_mod'])
MODALITIES_img = ['T1', 'T1c', 'T2', 'Flair']
np.random.seed(0)
tf.set_random_seed(1)
def str2bool(v):
return v.lower() in ("true")
class U_HeMISApplication(MultiModalApplication):
REQUIRED_CONFIG_SECTION = "MULTIMODAL"
def __init__(self, net_param, action_param, action):
super(MultiModalApplication, self).__init__()
tf.logging.info('starting segmentation application')
self.action = action
self.net_param = net_param
self.action_param = action_param
self.data_param = None
self.segmentation_param = None
self.SUPPORTED_SAMPLING = {
'uniform': (self.initialise_uniform_sampler,
self.initialise_grid_sampler,
self.initialise_grid_aggregator),
'weighted': (self.initialise_weighted_sampler,
self.initialise_grid_sampler,
self.initialise_grid_aggregator),
'resize': (self.initialise_resize_sampler,
self.initialise_resize_sampler,
self.initialise_resize_aggregator),
'balanced': (self.initialise_balanced_sampler,
self.initialise_grid_sampler,
self.initialise_grid_aggregator),
}
def set_iteration_update(self, iteration_message):
if self.is_training:
choices = []
nb_choices = np.random.randint(4)
choices = np.random.choice(4, nb_choices+1, replace=False, p=[1/4,1/4,1/4,1/4])
choices = [True if k in choices else False for k in range(4)]
print(choices)
iteration_message.data_feed_dict[self.choices] = choices
n_iter = iteration_message.current_iter
decay = 4
leng = 10000
iteration_message.data_feed_dict[self.lr] = self.action_param.lr /(decay**int( n_iter/leng ))
def connect_data_and_network(self,
outputs_collector=None,
gradients_collector=None):
# def data_net(for_training):
# with tf.name_scope('train' if for_training else 'validation'):
# sampler = self.get_sampler()[0][0 if for_training else -1]
# data_dict = sampler.pop_batch_op()
# image = tf.cast(data_dict['image'], tf.float32)
# return data_dict, self.net(image, is_training=for_training)
def switch_sampler(for_training):
with tf.name_scope('train' if for_training else 'validation'):
sampler = self.get_sampler()[0][0 if for_training else -1]
return sampler.pop_batch_op()
self.var = tf.placeholder_with_default(0, [], 'var')
self.choices = tf.placeholder_with_default([True, True, True, True], [4], 'choices')
if self.is_training:
self.lr = tf.placeholder_with_default(self.action_param.lr, [], 'learning_rate')
# if self.action_param.validation_every_n > 0:
# data_dict, net_out = tf.cond(tf.logical_not(self.is_validation),
# lambda: data_net(True),
# lambda: data_net(False))
# else:
# data_dict, net_out = data_net(True)
if self.action_param.validation_every_n > 0:
data_dict = tf.cond(tf.logical_not(self.is_validation),
lambda: switch_sampler(for_training=True),
lambda: switch_sampler(for_training=False))
else:
data_dict = switch_sampler(for_training=True)
with tf.name_scope('Optimiser'):
optimiser_class = OptimiserFactory.create(
name=self.action_param.optimiser)
self.optimiser = optimiser_class.get_instance(
learning_rate=self.lr)
image = tf.cast(data_dict['image'], tf.float32)
image_unstack = tf.unstack (image, axis=-1)
net_seg= self.net({MODALITIES_img[k]: tf.expand_dims(image_unstack[k],-1) for k in range(4)}, self.choices, is_training=self.is_training)
cross = LossFunction(
n_class=4,
loss_type='CrossEntropy')
dice = LossFunction(
n_class=4,
loss_type='Dice',
softmax=True)
gt = data_dict['label']
loss_cross = cross(prediction=net_seg,ground_truth=gt, weight_map=None)
loss_dice = dice(prediction=net_seg,ground_truth=gt)
data_loss = loss_cross + loss_dice
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
reg_loss = tf.reduce_mean(
[tf.reduce_mean(reg_loss) for reg_loss in reg_losses])
loss = data_loss + reg_loss
grads = self.optimiser.compute_gradients(loss, colocate_gradients_with_ops=False)
# collecting gradients variables
gradients_collector.add_to_collection([grads])
# collecting output variables
outputs_collector.add_to_collection(
var=data_loss, name='loss',
average_over_devices=False, collection=CONSOLE)
outputs_collector.add_to_collection(
var=loss, name='loss',
average_over_devices=True, summary_type='scalar',
collection=TF_SUMMARIES)
outputs_collector.add_to_collection(
var=self.choices, name='choices',
average_over_devices=False, collection=CONSOLE)
elif self.is_inference:
# converting logits into final output for
# classification probabilities or argmax classification labels
data_dict = switch_sampler(for_training=False)
image = tf.cast(data_dict['image'], tf.float32)
image = tf.unstack (image, axis=-1)
choices = self.segmentation_param.choices
choices = [str2bool(k) for k in choices]
print(choices)
net_seg = self.net({MODALITIES_img[k]: tf.expand_dims(image[k],-1) for k in range(4)}, choices, is_training=True, is_inference=True)
print('output')
post_process_layer = PostProcessingLayer(
'ARGMAX', num_classes=4)
net_seg = post_process_layer(net_seg)
outputs_collector.add_to_collection(
var=net_seg, name='window',
average_over_devices=False, collection=NETWORK_OUTPUT)
outputs_collector.add_to_collection(
var=data_dict['image_location'], name='location',
average_over_devices=False, collection=NETWORK_OUTPUT)
self.initialise_aggregator()
def interpret_output(self, batch_output):
if self.is_inference:
return self.output_decoder.decode_batch(
batch_output['window'], batch_output['location'])
return True
def initialise_evaluator(self, eval_param):
self.eval_param = eval_param
self.evaluator = SegmentationEvaluator(self.readers[0],
self.segmentation_param,
eval_param)
def add_inferred_output(self, data_param, task_param):
return self.add_inferred_output_like(data_param, task_param, 'label')
|
python
|
import numpy as np
import os
import tensorflow as tf
from tensorflow.contrib.framework import nest
def loop_tf(loop_fn, inputs, persistent_initializer, transient_initializer, n=None, time_major=False):
def create_tensor_array(initial_tensor: tf.Tensor):
return tf.TensorArray(initial_tensor.dtype, size=n, element_shape=initial_tensor.get_shape())
tensor_arrays = nest.map_structure(create_tensor_array, persistent_initializer)
def while_fn(*args):
current_iteration = args[0]
persistent_values = args[1]
transient_values = args[2]
current_tensor_arrays = args[3]
if time_major:
input_values = inputs[current_iteration]
else:
input_values = inputs[:, current_iteration]
new_persistent, new_transient = loop_fn(input_values, persistent_values, transient_values)
flat_new_persistent = nest.flatten(new_persistent)
flat_tensor_arrays = nest.flatten(current_tensor_arrays)
flat_written_tensor_arrays = [
ta.write(current_iteration, a) for ta, a in zip(flat_tensor_arrays, flat_new_persistent)
]
new_tensor_arrays = nest.pack_sequence_as(current_tensor_arrays, flat_written_tensor_arrays)
return current_iteration + 1, new_persistent, new_transient, new_tensor_arrays
def while_cond(*args):
seq_len = tf.shape(inputs)[0] if time_major else tf.shape(inputs)[1]
return tf.less(args[0], seq_len)
_, final_persistent, final_transient, final_tensor_arrays = \
tf.while_loop(while_cond, while_fn, (0, persistent_initializer, transient_initializer, tensor_arrays))
final_sequence_tensors = nest.map_structure(lambda x: x.stack(), final_tensor_arrays)
def make_batch_major(tensor):
permutation = np.arange(len(tensor.get_shape()))
permutation[:2] = permutation[:2][::-1]
return tf.transpose(tensor, permutation)
if not time_major:
final_sequence_tensors = nest.map_structure(make_batch_major, final_sequence_tensors)
return final_sequence_tensors
def combine_flat_list(_structure, _flat_list, axis=1):
_combined = []
for i in range(len(_flat_list[0])):
t = []
for v in _flat_list:
t.append(v[i])
cc = tf.concat(t, axis)
_combined.append(cc)
return nest.pack_sequence_as(_structure, _combined)
def tf_structure_equal(_s1, _s2):
return tf.reduce_all([tf.reduce_all(tf.equal(a, b)) for a, b in zip(_s1, _s2)])
def read_summary(path):
events = [os.path.join(path, a) for a in os.listdir(path) if a.count('events') > 0]
d = dict()
for e_path in events:
try:
for e in tf.train.summary_iterator(e_path):
for v in e.summary.value:
if v.tag not in d.keys():
d[v.tag] = []
d[v.tag].append((e.step, v.simple_value))
if v.tag == 'loss':
print(v.simple_value)
except:
pass
for k, v in d.items():
a = np.array(d[k])
sorted_inds = np.argsort(a[:, 0])
d[k] = a[sorted_inds]
return d
if __name__ == '__main__':
def loop_fn(inputs, persistent, transient):
new_transient = persistent**2
result = tf.where(tf.greater(new_transient, 25.), inputs, inputs + persistent)
return result, new_transient
a = tf.ones((1, 10))
b = loop_tf(loop_fn, a, tf.zeros((1,)), tf.ones((1,)), 10)
with tf.Session() as session:
print(session.run(b))
|
python
|
# import to namespace
from .base import IRRBase # noqa
from .native import IRRClient # noqa
|
python
|
# Generated by Django 3.1.1 on 2020-09-19 21:11
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='')),
('description', models.CharField(max_length=20)),
('text', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='MainInformation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language', models.CharField(choices=[('pl', 'pl'), ('en', 'en')], max_length=2)),
('title', models.CharField(max_length=30)),
('text', models.TextField()),
],
),
migrations.CreateModel(
name='Technology',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='')),
('title', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField()),
('language', models.CharField(choices=[('pl', 'pl'), ('en', 'en')], max_length=2)),
('title', models.CharField(max_length=50)),
('link', models.CharField(max_length=100)),
('image', models.ImageField(upload_to='')),
('description', models.TextField()),
('created', models.DateField()),
('technologies', models.ManyToManyField(to='projects.Technology')),
],
options={
'ordering': ['order'],
'get_latest_by': 'order',
},
),
migrations.CreateModel(
name='LearnProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField()),
('language', models.CharField(choices=[('pl', 'pl'), ('en', 'en')], max_length=2)),
('title', models.CharField(max_length=50)),
('link', models.CharField(max_length=100)),
('image', models.ImageField(upload_to='')),
('description', models.TextField()),
('created', models.DateField()),
('technologies', models.ManyToManyField(to='projects.Technology')),
],
options={
'ordering': ['order'],
'get_latest_by': 'order',
},
),
]
|
python
|
import numpy as np
import tensorflow as tf
from .utils import *
class Graph2Gauss:
"""
Implementation of the method proposed in the paper:
'Deep Gaussian Embedding of Graphs: Unsupervised Inductive Learning via Ranking'
by Aleksandar Bojchevski and Stephan Günnemann,
published at the 6th International Conference on Learning Representations (ICLR), 2018.
Copyright (C) 2018
Aleksandar Bojchevski
Technical University of Munich
"""
def __init__(self, A, X, L, K=1, p_val=0.10, p_test=0.05, p_nodes=0.0, n_hidden=None,
max_iter=2000, tolerance=100, scale=False, seed=0, verbose=True):
"""
Parameters
----------
A : scipy.sparse.spmatrix
Sparse unweighted adjacency matrix
X : scipy.sparse.spmatrix
Sparse attribute matirx
L : int
Dimensionality of the node embeddings
K : int
Maximum distance to consider
p_val : float
Percent of edges in the validation set, 0 <= p_val < 1
p_test : float
Percent of edges in the test set, 0 <= p_test < 1
p_nodes : float
Percent of nodes to hide (inductive learning), 0 <= p_nodes < 1
n_hidden : list(int)
A list specifying the size of each hidden layer, default n_hidden=[512]
max_iter : int
Maximum number of epoch for which to run gradient descent
tolerance : int
Used for early stopping. Number of epoch to wait for the score to improve on the validation set
scale : bool
Whether to apply the up-scaling terms.
seed : int
Random seed used to split the edges into train-val-test set
verbose : bool
Verbosity.
"""
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
X = X.astype(np.float32)
# completely hide some nodes from the network for inductive evaluation
if p_nodes > 0:
A = self.__setup_inductive(A, X, p_nodes)
else:
self.X = tf.SparseTensor(*sparse_feeder(X))
self.feed_dict = None
self.N, self.D = X.shape
self.L = L
self.max_iter = max_iter
self.tolerance = tolerance
self.scale = scale
self.verbose = verbose
if n_hidden is None:
n_hidden = [512]
self.n_hidden = n_hidden
# hold out some validation and/or test edges
# pre-compute the hops for each node for more efficient sampling
if p_val + p_test > 0:
train_ones, val_ones, val_zeros, test_ones, test_zeros = train_val_test_split_adjacency(
A=A, p_val=p_val, p_test=p_test, seed=seed, neg_mul=1, every_node=True, connected=False,
undirected=(A != A.T).nnz == 0)
A_train = edges_to_sparse(train_ones, self.N)
hops = get_hops(A_train, K)
else:
hops = get_hops(A, K)
scale_terms = {h if h != -1 else max(hops.keys()) + 1:
hops[h].sum(1).A1 if h != -1 else hops[1].shape[0] - hops[h].sum(1).A1
for h in hops}
self.__build()
self.__dataset_generator(hops, scale_terms)
self.__build_loss()
# setup the validation set for easy evaluation
if p_val > 0:
val_edges = np.row_stack((val_ones, val_zeros))
self.neg_val_energy = -self.energy_kl(val_edges)
self.val_ground_truth = A[val_edges[:, 0], val_edges[:, 1]].A1
self.val_early_stopping = True
else:
self.val_early_stopping = False
# setup the test set for easy evaluation
if p_test > 0:
test_edges = np.row_stack((test_ones, test_zeros))
self.neg_test_energy = -self.energy_kl(test_edges)
self.test_ground_truth = A[test_edges[:, 0], test_edges[:, 1]].A1
# setup the inductive test set for easy evaluation
if p_nodes > 0:
self.neg_ind_energy = -self.energy_kl(self.ind_pairs)
def __build(self):
w_init = tf.contrib.layers.xavier_initializer
sizes = [self.D] + self.n_hidden
for i in range(1, len(sizes)):
W = tf.get_variable(name='W{}'.format(i), shape=[sizes[i - 1], sizes[i]], dtype=tf.float32,
initializer=w_init())
b = tf.get_variable(name='b{}'.format(i), shape=[sizes[i]], dtype=tf.float32, initializer=w_init())
if i == 1:
encoded = tf.sparse_tensor_dense_matmul(self.X, W) + b
else:
encoded = tf.matmul(encoded, W) + b
encoded = tf.nn.relu(encoded)
W_mu = tf.get_variable(name='W_mu', shape=[sizes[-1], self.L], dtype=tf.float32, initializer=w_init())
b_mu = tf.get_variable(name='b_mu', shape=[self.L], dtype=tf.float32, initializer=w_init())
self.mu = tf.matmul(encoded, W_mu) + b_mu
W_sigma = tf.get_variable(name='W_sigma', shape=[sizes[-1], self.L], dtype=tf.float32, initializer=w_init())
b_sigma = tf.get_variable(name='b_sigma', shape=[self.L], dtype=tf.float32, initializer=w_init())
log_sigma = tf.matmul(encoded, W_sigma) + b_sigma
self.sigma = tf.nn.elu(log_sigma) + 1 + 1e-14
def __build_loss(self):
hop_pos = tf.stack([self.triplets[:, 0], self.triplets[:, 1]], 1)
hop_neg = tf.stack([self.triplets[:, 0], self.triplets[:, 2]], 1)
eng_pos = self.energy_kl(hop_pos)
eng_neg = self.energy_kl(hop_neg)
energy = tf.square(eng_pos) + tf.exp(-eng_neg)
if self.scale:
self.loss = tf.reduce_mean(energy * self.scale_terms)
else:
self.loss = tf.reduce_mean(energy)
def __setup_inductive(self, A, X, p_nodes):
N = A.shape[0]
nodes_rnd = np.random.permutation(N)
n_hide = int(N * p_nodes)
nodes_hide = nodes_rnd[:n_hide]
A_hidden = A.copy().tolil()
A_hidden[nodes_hide] = 0
A_hidden[:, nodes_hide] = 0
# additionally add any dangling nodes to the hidden ones since we can't learn from them
nodes_dangling = np.where(A_hidden.sum(0).A1 + A_hidden.sum(1).A1 == 0)[0]
if len(nodes_dangling) > 0:
nodes_hide = np.concatenate((nodes_hide, nodes_dangling))
nodes_keep = np.setdiff1d(np.arange(N), nodes_hide)
self.X = tf.sparse_placeholder(tf.float32)
self.feed_dict = {self.X: sparse_feeder(X[nodes_keep])}
self.ind_pairs = batch_pairs_sample(A, nodes_hide)
self.ind_ground_truth = A[self.ind_pairs[:, 0], self.ind_pairs[:, 1]].A1
self.ind_feed_dict = {self.X: sparse_feeder(X)}
A = A[nodes_keep][:, nodes_keep]
return A
def energy_kl(self, pairs):
"""
Computes the energy of a set of node pairs as the KL divergence between their respective Gaussian embeddings.
Parameters
----------
pairs : array-like, shape [?, 2]
The edges/non-edges for which the energy is calculated
Returns
-------
energy : array-like, shape [?]
The energy of each pair given the currently learned model
"""
ij_mu = tf.gather(self.mu, pairs)
ij_sigma = tf.gather(self.sigma, pairs)
sigma_ratio = ij_sigma[:, 1] / ij_sigma[:, 0]
trace_fac = tf.reduce_sum(sigma_ratio, 1)
log_det = tf.reduce_sum(tf.log(sigma_ratio + 1e-14), 1)
mu_diff_sq = tf.reduce_sum(tf.square(ij_mu[:, 0] - ij_mu[:, 1]) / ij_sigma[:, 0], 1)
return 0.5 * (trace_fac + mu_diff_sq - self.L - log_det)
def __dataset_generator(self, hops, scale_terms):
"""
Generates a set of triplets and associated scaling terms by:
1. Sampling for each node a set of nodes from each of its neighborhoods
2. Forming all implied pairwise constraints
Uses tf.Dataset API to perform the sampling in a separate thread for increased speed.
Parameters
----------
hops : dict
A dictionary where each 1, 2, ... K, neighborhoods are saved as sparse matrices
scale_terms : dict
The appropriate up-scaling terms to ensure unbiased estimates for each neighbourhood
Returns
-------
"""
def gen():
while True:
yield to_triplets(sample_all_hops(hops), scale_terms)
dataset = tf.data.Dataset.from_generator(gen, (tf.int32, tf.float32), ([None, 3], [None]))
self.triplets, self.scale_terms = dataset.prefetch(1).make_one_shot_iterator().get_next()
def __save_vars(self, sess):
"""
Saves all the trainable variables in memory. Used for early stopping.
Parameters
----------
sess : tf.Session
Tensorflow session used for training
"""
self.saved_vars = {var.name: (var, sess.run(var)) for var in tf.trainable_variables()}
def __restore_vars(self, sess):
"""
Restores all the trainable variables from memory. Used for early stopping.
Parameters
----------
sess : tf.Session
Tensorflow session used for training
"""
for name in self.saved_vars:
sess.run(tf.assign(self.saved_vars[name][0], self.saved_vars[name][1]))
def train(self, gpu_list='0'):
"""
Trains the model.
Parameters
----------
gpu_list : string
A list of available GPU devices.
Returns
-------
sess : tf.Session
Tensorflow session that can be used to obtain the trained embeddings
"""
early_stopping_score_max = -float('inf')
tolerance = self.tolerance
train_op = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(self.loss)
sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(visible_device_list=gpu_list,
allow_growth=True)))
sess.run(tf.global_variables_initializer())
for epoch in range(self.max_iter):
loss, _ = sess.run([self.loss, train_op], self.feed_dict)
if self.val_early_stopping:
val_auc, val_ap = score_link_prediction(self.val_ground_truth, sess.run(self.neg_val_energy, self.feed_dict))
early_stopping_score = val_auc + val_ap
if self.verbose and epoch % 50 == 0:
print('epoch: {:3d}, loss: {:.4f}, val_auc: {:.4f}, val_ap: {:.4f}'.format(epoch, loss, val_auc, val_ap))
else:
early_stopping_score = -loss
if self.verbose and epoch % 50 == 0:
print('epoch: {:3d}, loss: {:.4f}'.format(epoch, loss))
if early_stopping_score > early_stopping_score_max:
early_stopping_score_max = early_stopping_score
tolerance = self.tolerance
self.__save_vars(sess)
else:
tolerance -= 1
if tolerance == 0:
break
if tolerance > 0:
print('WARNING: Training might not have converged. Try increasing max_iter')
self.__restore_vars(sess)
return sess
|
python
|
# Copyright (C) 2005, Giovanni Bajo
# Based on previous work under copyright (c) 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition to the permissions in the GNU General Public License, the
# authors give you unlimited permission to link or embed the compiled
# version of this file into combinations with other programs, and to
# distribute those combinations without any restriction coming from the
# use of this file. (The General Public License restrictions do apply in
# other respects; for example, they cover modification of the file, and
# distribution when not linked into a combine executable.)
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import os, sys
supportdir = os.path.join(sys.path[0], 'support')
genpydir = os.path.join(supportdir, 'gen_py')
initmod = os.path.join(genpydir, '__init__.py')
if not os.path.exists(genpydir):
os.makedirs(genpydir)
if not os.path.exists(initmod):
open(initmod, 'w')
import win32com
win32com.__gen_path__ = genpydir
win32com.__path__.insert(0, supportdir)
# for older Pythons
import copy_reg
|
python
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
from core.models import User
from .utils import create_user
CREATE_USER_URL = reverse('api:user_create')
ME_URL = reverse('api:user_me')
class TestPublicUserAPI(TestCase):
"""Tests for the public API for the User model"""
def setUp(self) -> None:
"""Sets APIClient for the tests"""
self.client = APIClient()
def test_create_valid_user_success(self) -> None:
"""Tests if a User is properly created"""
payload = {
'email': '[email protected]',
'password': 'test_password',
'username': 'test_username',
}
response = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
user = get_user_model().objects.get(**response.data)
self.assertGreaterEqual(user.crypto_key, 100000000)
self.assertLessEqual(user.crypto_key, 999999999)
self.assertTrue(user.check_password(payload['password']))
self.assertNotIn('password', response.data)
def test_user_exists(self) -> None:
"""Tests what happens when a User is created twice"""
payload = {
'email': '[email protected]',
'password': 'test_password',
'username': 'test_username',
}
create_user(**payload)
response = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_password_too_short(self) -> None:
"""Tests what happens when a password is too short"""
payload = {
'email': '[email protected]',
'password': 'pass',
'username': 'test_username',
}
response = self.client.post(CREATE_USER_URL, payload)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
user_exists = (
get_user_model().objects.filter(email=payload['email']).exists()
)
self.assertFalse(user_exists)
def test_retrieve_user_unauthorized(self) -> None:
"""
Tests what happens when not authenticated User requests 'user_me' view
"""
response = self.client.get(ME_URL)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class TestPrivateUserAPI(TestCase):
"""Tests for the private API for the User model"""
def setUp(self) -> None:
"""Creates and authenticates User for the tests"""
self.user = create_user(
email='[email protected]',
password='test_password',
username='test_username',
)
self.client = APIClient()
self.client.force_authenticate(user=self.user)
def test_retrieve_user_success(self) -> None:
"""Tests if User's info is retrieved successfully"""
response = self.client.get(ME_URL)
expected_result = {
'username': self.user.username,
'email': self.user.email,
'crypto_key': self.user.crypto_key,
}
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, expected_result)
def test_post_me_not_allowed(self) -> None:
"""Tests if POST is not allowed for the 'user_me' view"""
response = self.client.post(ME_URL, {})
self.assertEqual(
response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED
)
def test_update_user_profile(self) -> None:
"""Tests if User's profile is updated successfully"""
payload = {'password': 'new_password', 'username': 'new_username'}
response = self.client.patch(ME_URL, payload)
self.user.refresh_from_db()
self.assertEqual(self.user.username, payload['username'])
self.assertTrue(self.user.check_password(payload['password']))
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_user(self) -> None:
"""Tests if User is deleted successfully"""
response = self.client.delete(ME_URL)
user_exists = User.objects.filter(username=self.user.username).exists()
self.assertFalse(user_exists)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
|
python
|
import os
import shutil
import sys
import argparse
import time
import itertools
import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
import torch.optim as optim
import torch.nn.functional as F
from sklearn.metrics import confusion_matrix
import scikitplot as skplt
from torch.backends import cudnn
from torch.nn import DataParallel
import torchvision.transforms as transforms
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
sys.path.append('./')
from networks.resnet import resnet18
from contrast.models import vgg
from utils.util import set_prefix, write, add_prefix, remove_prefix
from utils.read_data import EasyDR
plt.switch_backend('agg')
parser = argparse.ArgumentParser(description='Training on Diabetic Retinopathy Dataset')
parser.add_argument('--batch_size', '-b', default=5, type=int, help='batch size')
parser.add_argument('--epochs', '-e', default=90, type=int, help='training epochs')
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')
parser.add_argument('--cuda', default=torch.cuda.is_available(), type=bool, help='use gpu or not')
parser.add_argument('--step_size', default=50, type=int, help='learning rate decay interval')
parser.add_argument('--gamma', default=0.1, type=float, help='learning rate decay scope')
parser.add_argument('--interval_freq', '-i', default=12, type=int, help='printing log frequence')
parser.add_argument('--data', '-d', default='./data/target_128', help='path to dataset')
parser.add_argument('--prefix', '-p', default='classifier', type=str, help='folder prefix')
parser.add_argument('--best_model_path', default='model_best.pth.tar', help='best model saved path')
parser.add_argument('--model_type', '-m', default='vgg', type=str, help='classifier type', choices=['vgg', 'resnet18'])
min_loss = 100000.0
best_acc = 0.0
def main():
global args, min_loss, best_acc
args = parser.parse_args()
device_counts = torch.cuda.device_count()
print('there is %d gpus in usage' % (device_counts))
# save source script
set_prefix(args.prefix, __file__)
model = model_selector(args.model_type)
print(model)
if args.cuda:
model = DataParallel(model).cuda()
else:
raise RuntimeError('there is no gpu')
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# accelerate the speed of training
cudnn.benchmark = True
train_loader, val_loader = load_dataset()
# class_names=['LESION', 'NORMAL']
class_names = train_loader.dataset.class_names
print(class_names)
criterion = nn.BCELoss().cuda()
# learning rate decay per epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=args.step_size, gamma=args.gamma)
since = time.time()
print('-' * 10)
for epoch in range(args.epochs):
exp_lr_scheduler.step()
train(train_loader, model, optimizer, criterion, epoch)
cur_loss, cur_acc = validate(model, val_loader, criterion)
is_best = cur_loss < min_loss
best_loss = min(cur_loss, min_loss)
if is_best:
best_acc = cur_acc
save_checkpoint({
'epoch': epoch + 1,
'arch': args.model_type,
'state_dict': model.state_dict(),
'min_loss': best_loss,
'acc': best_acc,
'optimizer': optimizer.state_dict(),
}, is_best)
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
check_point = torch.load(add_prefix(args.prefix, args.best_model_path))
print('min_loss=%.4f, best_acc=%.4f' %(check_point['min_loss'], check_point['acc']))
write(vars(args), add_prefix(args.prefix, 'paras.txt'))
def model_selector(model_type):
if model_type == 'vgg':
model = vgg()
elif model_type == 'resnet18':
model = resnet18(is_ptrtrained=False)
else:
raise ValueError('')
return model
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
# save training state after each epoch
torch.save(state, add_prefix(args.prefix, filename))
if is_best:
shutil.copyfile(add_prefix(args.prefix, filename),
add_prefix(args.prefix, args.best_model_path))
def load_dataset():
if args.data == './data/flip':
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
mean = [0.5, 0.5, 0.5]
std = [0.5, 0.5, 0.5]
normalize = transforms.Normalize(mean, std)
# pre_transforms = transforms.Compose([
# transforms.RandomHorizontalFlip(),
# transforms.RandomVerticalFlip(),
# transforms.RandomRotation(10),
# transforms.ColorJitter(0.05, 0.05, 0.05, 0.05)
# ])
pre_transforms = transforms.Compose([
transforms.RandomRotation(5),
transforms.ColorJitter(0.05, 0.05, 0.05, 0.05)
])
post_transforms = transforms.Compose([
transforms.ToTensor(),
normalize
])
val_transforms = transforms.Compose([
transforms.ToTensor(),
normalize,
])
train_dataset = EasyDR(traindir, pre_transforms, post_transforms, alpha=0)
val_dataset = EasyDR(valdir, None, val_transforms, alpha=0)
print('load flipped DR successfully!!!')
else:
raise ValueError("parameter 'data' that means path to dataset must be in "
"['./data/target_128', './data/flip']")
train_loader = DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True,
num_workers=4,
pin_memory=True if args.cuda else False)
val_loader = DataLoader(val_dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=1,
pin_memory=True if args.cuda else False)
return train_loader, val_loader
def train(train_loader, model, optimizer, criterion, epoch, threshold=0.5):
model.train(True)
print('Epoch {}/{}'.format(epoch + 1, args.epochs))
print('-' * 10)
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for idx, (inputs, labels, _, _) in enumerate(train_loader):
# wrap them in Variable
if args.cuda:
inputs, labels = inputs.cuda(), labels.float().cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward
outputs = F.sigmoid(model(inputs).squeeze(1))
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if idx % args.interval_freq == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch + 1, idx * len(inputs), len(train_loader.dataset),
100. * idx / len(train_loader), loss.item()))
pred = torch.where(outputs.data > threshold, torch.ones_like(outputs.data), torch.zeros_like(outputs.data)).long()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(pred == labels.data.long()).item()
epoch_loss = running_loss / len(train_loader.dataset)
epoch_acc = running_corrects / len(train_loader.dataset)
print('Training Loss: {:.4f} Acc: {:.4f}'.format(epoch_loss, epoch_acc))
def validate(model, val_loader, criterion, threshold=0.5):
model.eval()
test_loss = 0
correct = 0
for data, target, _, _ in val_loader:
if args.cuda:
data, target = data.cuda(), target.float().cuda()
outputs = F.sigmoid(model(data).squeeze(1))
test_loss += criterion(outputs, target).item()
# get the index of the max log-probability
pred = torch.where(outputs.data > threshold, torch.ones_like(outputs.data), torch.zeros_like(outputs.data)).long()
correct += torch.sum(pred == target.data.long()).item()
test_loss /= len(val_loader.dataset)
test_acc = 100. * correct / len(val_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.3f}%)\n'.format(
test_loss, correct, len(val_loader.dataset), test_acc))
return test_loss, test_acc
if __name__ == '__main__':
main()
|
python
|
from sh import gmkproject
from sh import gchproject
from sh import gchuser
from sh import glusers
from sh import gstatement
from cloudmesh.accounting.AccountingBaseClass import AccountingBaseClass
class GoldAccounting(AccountingBaseClass):
"""The gold accounting implementation class"""
def project_usage(self, project):
statement = gstatement("-p", project)
return statement
# what does summarize do
# needs to return dict
def user_usage(self, userid):
statement = gstatement("-u", userid)
return statement
# needs to return dict
def projects(self, userid):
list = glusers("--show", "Projects", userid, "-l", "--quiet")
return list
# what does --raw do?
# bug this should be returned as a list while removing \n and
# putting it in a list instead of a string
def users(self):
list = glusers("--show Name", "--quiet")
return list
# bug this should be returned as a list while removing \n and
# putting it in a list instead of a string
def default_project(self, userid, project):
# can be done with gchsuser
pass
def modify_user(self, userid, email=None, phone=None,
firstname=None, lastname=None):
# needs to check if common name is unique, if its not we may
# want to add numbers
if firstname is not None or lastname is not None:
gchuser("-n", "%s %s" % (firstname, lastname))
if email is not None:
gchuser("-E", email)
if phone is not None:
gchuser("-F", phone)
def add_project(self, name, description):
gmkproject("-d", description, "-p", name)
def add_user_to_projects(self, project, userid):
username = None # transfer user id to username
gchproject("-addUsers", username, project)
def deactivate_project(self, name):
gchproject("-I", name)
def deactivate_user_from_project(self, project, userid):
gchproject("--deactUsers", userid)
def activate_user_from_project(self, project, userid):
gchproject("--actUsers", userid)
|
python
|
from django.db import models
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
# Create your models here.
@receiver(post_save, Sender = User)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
class ExtraInfo(models.Model):
RODZAJE = {
(0, 'Nieznany'),
(1, 'Horror'),
(2, 'Sci-Fi'),
(3, 'Dramat'),
(5, 'Komedia'),
}
czas_trwania = models.IntegerField()
rodzaj = models.IntegerField(choices=RODZAJE, default=0)
class Film(models.Model):
tytul = models.CharField(max_length=50)
opis = models.TextField()
po_premierze = models.BooleanField(default=False)
premiera = models.DateField(null=True, blank=True)
rok = models.IntegerField()
imdb_rating = models.DecimalField(decimal_places=2, max_digits=4 ,blank=True, null=True)
extra_info = models.OneToOneField(ExtraInfo, on_delete=models.CASCADE, null=True, blank=True)
def __str__(self):
return self.tytul
class Recenzja(models.Model):
opis = models.TextField(default='')
gwiazdki = models.IntegerField(default=5)
film = models.ForeignKey(Film, on_delete=models.CASCADE, related_name='recenzje')
class Aktor(models.Model):
imie = models.CharField(max_length=32)
nazwisko = models.CharField(max_length=32)
filmy = models.ManyToManyField(Film)
def __str__(self):
return imie
|
python
|
# This file is part of the GhostDriver project from Neustar inc.
#
# Copyright (c) 2012, Ivan De Marino <[email protected] / [email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.insert(0, "utils")
import properties
import unittest
from selenium import webdriver
class BaseTest(unittest.TestCase):
def __init__(self, arguments):
super(BaseTest, self).__init__(arguments)
# Reading configuration
self.config = properties.Properties()
self.config.load(open("../config.ini"))
# Preparing Capabilities
self.caps = {
'takeScreenshot': False,
'javascriptEnabled': True
}
def setUp(self):
driver = self.config.getProperty("driver")
# TODO Use/Make a PhantomJSDriver for Python
# TODO Handle the case where "driver" is a URL to a RemoteWebDriver instance
# Decide the Driver to use
if driver == "firefox":
self.driver = webdriver.Firefox()
else:
self.driver = webdriver.Remote(
command_executor="http://localhost:8080/wd/hub",
desired_capabilities=self.caps)
def tearDown(self):
self.driver.close()
|
python
|
# doit automatically picks up tasks as long as their unqualified name is prefixed with task_.
# Read the guide: https://pydoit.org/tasks.html
from dodos.action import *
from dodos.behavior import *
from dodos.benchbase import *
from dodos.ci import *
from dodos.forecast import *
from dodos.noisepage import *
from dodos.pilot import *
from dodos.project1 import *
|
python
|
C_CONST = 0.375
MC_ITERATION_COUNT = 100
D_MAX = 100
|
python
|
from typing import Tuple
from NewDeclarationInQueue.processfiles.customprocess.search_lines_in_pages import SearchLinesInPage
from NewDeclarationInQueue.processfiles.customprocess.search_text_line_parameter import SearchTextLineParameter
from NewDeclarationInQueue.processfiles.process_messages import ProcessMessages
class SearchTableInPages:
"""
Class to search tables in document
"""
page_no = 0
search_text_limit: SearchLinesInPage
cell_search: SearchTextLineParameter
def __init__(self, page_no: int, upper_text_search: SearchTextLineParameter, \
lower_text_search: SearchTextLineParameter, cell_search: SearchTextLineParameter):
"""
Initialize the class with the necessary info for the processing
Args:
page_no (int): page number where the table is located
upper_text_search (SearchTextLineParameter): parameters to search the upper line that defines the table position
lowertext_search (SearchTextLineParameter): parameters to search the lower line that defines the table position
cell_search (SearchTextLineParameter): parameters to search the first cell in the header of the table
"""
self.page_no = page_no
self.search_text_limit = SearchLinesInPage(page_no, upper_text_search, lower_text_search)
self.cell_search = cell_search
def change_page(self, new_page_no: int):
"""
Change the page where the table is searched
Args:
new_page_no (int): new page number where the table should be searched
"""
self.page_no = new_page_no
self.search_text_limit.page_no = new_page_no
def find_table_by_position(self, table_pages, page_no, bounding_box) -> list:
"""
Fint the table by bounding box
Args:
table_pages ([list]): list of pages in the document
page_no ([int]): page number where the table should be searched
bounding_box ([list]): list of points that defines the bounding box of the table
Returns:
[list]: JSON information corresponding to the table
"""
if page_no >= len(table_pages) or bounding_box is None:
return None
page = table_pages[page_no]
found_table = None
for table in page['form']['tables']:
table_bounding_box = table['bounding_box']
if (abs(table_bounding_box[1] - bounding_box[1]) <= 0.5 and
table_bounding_box[5] >= bounding_box[5]):
found_table = table
break
return found_table
def search_table(self, data: dict, message: ProcessMessages) -> Tuple[list, ProcessMessages, int]:
"""
Search a table in a document
Args:
data (dict): input JSON data of the doucment
message (ProcessMessages): collects the messsages generated in the processing workflow
Returns:
Tuple[list, ProcessMessages, int]: JSON info corresponding to the searched information,
the processing messages and the page number where
the table was found
"""
table_headers = []
bOk = self.search_text_limit.get_limit_lines(data)
if bOk == False:
message.add_message('Text not found', self.search_text_limit.to_string(), 'search_table')
return None, message, self.search_text_limit.end_page_no
if self.search_text_limit.end_page_no < 0:
txt = self.search_text_limit.find_line_between_lines(data, self.page_no, self.cell_search, \
self.search_text_limit.n_min, self.search_text_limit.n_max)
if txt is not None:
tab = self.find_table_by_position(data, self.page_no, txt['bounding_box'])
if tab is not None:
table_headers.append(tab)
else:
txt = self.search_text_limit.find_line_between_lines(data, self.page_no, self.cell_search, \
self.search_text_limit.n_min, None)
if txt is not None:
tab = self.find_table_by_position(data, self.page_no, txt['bounding_box'])
if tab is not None:
table_headers.append(tab)
n_count = self.page_no + 1
while n_count <= self.search_text_limit.end_page_no:
txt = self.search_text_limit.find_line_between_lines(data, n_count, \
self.cell_search, None, \
(self.search_text_limit.n_max if n_count == self.search_text_limit.end_page_no else None))
if txt is not None:
tab = self.find_table_by_position(data, n_count, txt['bounding_box'])
if tab is not None:
table_headers.append(tab)
n_count += 1
return table_headers, message, (self.search_text_limit.end_page_no if self.search_text_limit.end_page_no > 0 else self.page_no)
|
python
|
# Generated from Mu.g4 by ANTLR 4.7.1
# encoding: utf-8
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3&")
buf.write("\u0082\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\3\2")
buf.write("\3\2\3\2\3\3\7\3\37\n\3\f\3\16\3\"\13\3\3\4\3\4\3\4\3")
buf.write("\4\3\4\3\4\3\4\5\4+\n\4\3\5\3\5\3\5\3\6\3\6\3\6\3\6\3")
buf.write("\6\3\7\3\7\3\7\3\7\3\7\7\7:\n\7\f\7\16\7=\13\7\3\7\3\7")
buf.write("\5\7A\n\7\3\b\3\b\3\b\3\t\3\t\3\t\3\t\3\t\5\tK\n\t\3\n")
buf.write("\3\n\3\n\3\n\3\13\3\13\3\13\3\13\3\f\3\f\3\f\3\f\3\f\3")
buf.write("\f\5\f[\n\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3")
buf.write("\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\7\fr\n\f\f")
buf.write("\f\16\fu\13\f\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\3\r\5\r")
buf.write("\u0080\n\r\3\r\2\3\26\16\2\4\6\b\n\f\16\20\22\24\26\30")
buf.write("\2\b\3\2\16\20\3\2\f\r\3\2\b\13\3\2\6\7\3\2!\"\3\2\31")
buf.write("\32\2\u008c\2\32\3\2\2\2\4 \3\2\2\2\6*\3\2\2\2\b,\3\2")
buf.write("\2\2\n/\3\2\2\2\f\64\3\2\2\2\16B\3\2\2\2\20J\3\2\2\2\22")
buf.write("L\3\2\2\2\24P\3\2\2\2\26Z\3\2\2\2\30\177\3\2\2\2\32\33")
buf.write("\5\4\3\2\33\34\7\2\2\3\34\3\3\2\2\2\35\37\5\6\4\2\36\35")
buf.write("\3\2\2\2\37\"\3\2\2\2 \36\3\2\2\2 !\3\2\2\2!\5\3\2\2\2")
buf.write("\" \3\2\2\2#+\5\n\6\2$+\5\f\7\2%+\5\22\n\2&+\5\24\13\2")
buf.write("\'+\5\b\5\2()\7&\2\2)+\b\4\1\2*#\3\2\2\2*$\3\2\2\2*%\3")
buf.write("\2\2\2*&\3\2\2\2*\'\3\2\2\2*(\3\2\2\2+\7\3\2\2\2,-\7\3")
buf.write("\2\2-.\5\26\f\2.\t\3\2\2\2/\60\7 \2\2\60\61\7\24\2\2\61")
buf.write("\62\5\26\f\2\62\63\7\23\2\2\63\13\3\2\2\2\64\65\7\34\2")
buf.write("\2\65;\5\16\b\2\66\67\7\35\2\2\678\7\34\2\28:\5\16\b\2")
buf.write("9\66\3\2\2\2:=\3\2\2\2;9\3\2\2\2;<\3\2\2\2<@\3\2\2\2=")
buf.write(";\3\2\2\2>?\7\35\2\2?A\5\20\t\2@>\3\2\2\2@A\3\2\2\2A\r")
buf.write("\3\2\2\2BC\5\26\f\2CD\5\20\t\2D\17\3\2\2\2EF\7\27\2\2")
buf.write("FG\5\4\3\2GH\7\30\2\2HK\3\2\2\2IK\5\6\4\2JE\3\2\2\2JI")
buf.write("\3\2\2\2K\21\3\2\2\2LM\7\36\2\2MN\5\26\f\2NO\5\20\t\2")
buf.write("O\23\3\2\2\2PQ\7\37\2\2QR\5\26\f\2RS\7\23\2\2S\25\3\2")
buf.write("\2\2TU\b\f\1\2UV\7\r\2\2V[\5\26\f\13WX\7\22\2\2X[\5\26")
buf.write("\f\nY[\5\30\r\2ZT\3\2\2\2ZW\3\2\2\2ZY\3\2\2\2[s\3\2\2")
buf.write("\2\\]\f\f\2\2]^\7\21\2\2^r\5\26\f\f_`\f\t\2\2`a\t\2\2")
buf.write("\2ar\5\26\f\nbc\f\b\2\2cd\t\3\2\2dr\5\26\f\tef\f\7\2\2")
buf.write("fg\t\4\2\2gr\5\26\f\bhi\f\6\2\2ij\t\5\2\2jr\5\26\f\7k")
buf.write("l\f\5\2\2lm\7\5\2\2mr\5\26\f\6no\f\4\2\2op\7\4\2\2pr\5")
buf.write("\26\f\5q\\\3\2\2\2q_\3\2\2\2qb\3\2\2\2qe\3\2\2\2qh\3\2")
buf.write("\2\2qk\3\2\2\2qn\3\2\2\2ru\3\2\2\2sq\3\2\2\2st\3\2\2\2")
buf.write("t\27\3\2\2\2us\3\2\2\2vw\7\25\2\2wx\5\26\f\2xy\7\26\2")
buf.write("\2y\u0080\3\2\2\2z\u0080\t\6\2\2{\u0080\t\7\2\2|\u0080")
buf.write("\7 \2\2}\u0080\7#\2\2~\u0080\7\33\2\2\177v\3\2\2\2\177")
buf.write("z\3\2\2\2\177{\3\2\2\2\177|\3\2\2\2\177}\3\2\2\2\177~")
buf.write("\3\2\2\2\u0080\31\3\2\2\2\13 *;@JZqs\177")
return buf.getvalue()
class MuParser ( Parser ):
grammarFileName = "Mu.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "'return'", "'||'", "'&&'", "'=='", "'!='",
"'>'", "'<'", "'>='", "'<='", "'+'", "'-'", "'*'",
"'/'", "'%'", "'^'", "'!'", "';'", "'='", "'('", "')'",
"'{'", "'}'", "'true'", "'false'", "'nil'", "'if'",
"'else'", "'while'", "'log'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "OR", "AND", "EQ", "NEQ",
"GT", "LT", "GTEQ", "LTEQ", "PLUS", "MINUS", "MULT",
"DIV", "MOD", "POW", "NOT", "SCOL", "ASSIGN", "OPAR",
"CPAR", "OBRACE", "CBRACE", "TRUE", "FALSE", "NIL",
"IF", "ELSE", "WHILE", "LOG", "ID", "INT", "FLOAT",
"STRING", "COMMENT", "SPACE", "OTHER" ]
RULE_parse = 0
RULE_block = 1
RULE_stat = 2
RULE_return_expr = 3
RULE_assignment = 4
RULE_if_stat = 5
RULE_condition_block = 6
RULE_stat_block = 7
RULE_while_stat = 8
RULE_log = 9
RULE_expr = 10
RULE_atom = 11
ruleNames = [ "parse", "block", "stat", "return_expr", "assignment",
"if_stat", "condition_block", "stat_block", "while_stat",
"log", "expr", "atom" ]
EOF = Token.EOF
T__0=1
OR=2
AND=3
EQ=4
NEQ=5
GT=6
LT=7
GTEQ=8
LTEQ=9
PLUS=10
MINUS=11
MULT=12
DIV=13
MOD=14
POW=15
NOT=16
SCOL=17
ASSIGN=18
OPAR=19
CPAR=20
OBRACE=21
CBRACE=22
TRUE=23
FALSE=24
NIL=25
IF=26
ELSE=27
WHILE=28
LOG=29
ID=30
INT=31
FLOAT=32
STRING=33
COMMENT=34
SPACE=35
OTHER=36
def __init__(self, input:TokenStream, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7.1")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class ParseContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def block(self):
return self.getTypedRuleContext(MuParser.BlockContext,0)
def EOF(self):
return self.getToken(MuParser.EOF, 0)
def getRuleIndex(self):
return MuParser.RULE_parse
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParse" ):
listener.enterParse(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParse" ):
listener.exitParse(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParse" ):
return visitor.visitParse(self)
else:
return visitor.visitChildren(self)
def parse(self):
localctx = MuParser.ParseContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_parse)
try:
self.enterOuterAlt(localctx, 1)
self.state = 24
self.block()
self.state = 25
self.match(MuParser.EOF)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class BlockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def stat(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(MuParser.StatContext)
else:
return self.getTypedRuleContext(MuParser.StatContext,i)
def getRuleIndex(self):
return MuParser.RULE_block
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBlock" ):
listener.enterBlock(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBlock" ):
listener.exitBlock(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBlock" ):
return visitor.visitBlock(self)
else:
return visitor.visitChildren(self)
def block(self):
localctx = MuParser.BlockContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_block)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 30
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << MuParser.T__0) | (1 << MuParser.IF) | (1 << MuParser.WHILE) | (1 << MuParser.LOG) | (1 << MuParser.ID) | (1 << MuParser.OTHER))) != 0):
self.state = 27
self.stat()
self.state = 32
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StatContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._OTHER = None # Token
def assignment(self):
return self.getTypedRuleContext(MuParser.AssignmentContext,0)
def if_stat(self):
return self.getTypedRuleContext(MuParser.If_statContext,0)
def while_stat(self):
return self.getTypedRuleContext(MuParser.While_statContext,0)
def log(self):
return self.getTypedRuleContext(MuParser.LogContext,0)
def return_expr(self):
return self.getTypedRuleContext(MuParser.Return_exprContext,0)
def OTHER(self):
return self.getToken(MuParser.OTHER, 0)
def getRuleIndex(self):
return MuParser.RULE_stat
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStat" ):
listener.enterStat(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStat" ):
listener.exitStat(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStat" ):
return visitor.visitStat(self)
else:
return visitor.visitChildren(self)
def stat(self):
localctx = MuParser.StatContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_stat)
try:
self.state = 40
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [MuParser.ID]:
self.enterOuterAlt(localctx, 1)
self.state = 33
self.assignment()
pass
elif token in [MuParser.IF]:
self.enterOuterAlt(localctx, 2)
self.state = 34
self.if_stat()
pass
elif token in [MuParser.WHILE]:
self.enterOuterAlt(localctx, 3)
self.state = 35
self.while_stat()
pass
elif token in [MuParser.LOG]:
self.enterOuterAlt(localctx, 4)
self.state = 36
self.log()
pass
elif token in [MuParser.T__0]:
self.enterOuterAlt(localctx, 5)
self.state = 37
self.return_expr()
pass
elif token in [MuParser.OTHER]:
self.enterOuterAlt(localctx, 6)
self.state = 38
localctx._OTHER = self.match(MuParser.OTHER)
print("unknown char: " + (None if localctx._OTHER is None else localctx._OTHER.text));
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Return_exprContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expr(self):
return self.getTypedRuleContext(MuParser.ExprContext,0)
def getRuleIndex(self):
return MuParser.RULE_return_expr
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterReturn_expr" ):
listener.enterReturn_expr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitReturn_expr" ):
listener.exitReturn_expr(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitReturn_expr" ):
return visitor.visitReturn_expr(self)
else:
return visitor.visitChildren(self)
def return_expr(self):
localctx = MuParser.Return_exprContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_return_expr)
try:
self.enterOuterAlt(localctx, 1)
self.state = 42
self.match(MuParser.T__0)
self.state = 43
self.expr(0)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AssignmentContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def ID(self):
return self.getToken(MuParser.ID, 0)
def ASSIGN(self):
return self.getToken(MuParser.ASSIGN, 0)
def expr(self):
return self.getTypedRuleContext(MuParser.ExprContext,0)
def SCOL(self):
return self.getToken(MuParser.SCOL, 0)
def getRuleIndex(self):
return MuParser.RULE_assignment
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAssignment" ):
listener.enterAssignment(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAssignment" ):
listener.exitAssignment(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAssignment" ):
return visitor.visitAssignment(self)
else:
return visitor.visitChildren(self)
def assignment(self):
localctx = MuParser.AssignmentContext(self, self._ctx, self.state)
self.enterRule(localctx, 8, self.RULE_assignment)
try:
self.enterOuterAlt(localctx, 1)
self.state = 45
self.match(MuParser.ID)
self.state = 46
self.match(MuParser.ASSIGN)
self.state = 47
self.expr(0)
self.state = 48
self.match(MuParser.SCOL)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class If_statContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def IF(self, i:int=None):
if i is None:
return self.getTokens(MuParser.IF)
else:
return self.getToken(MuParser.IF, i)
def condition_block(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(MuParser.Condition_blockContext)
else:
return self.getTypedRuleContext(MuParser.Condition_blockContext,i)
def ELSE(self, i:int=None):
if i is None:
return self.getTokens(MuParser.ELSE)
else:
return self.getToken(MuParser.ELSE, i)
def stat_block(self):
return self.getTypedRuleContext(MuParser.Stat_blockContext,0)
def getRuleIndex(self):
return MuParser.RULE_if_stat
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIf_stat" ):
listener.enterIf_stat(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIf_stat" ):
listener.exitIf_stat(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIf_stat" ):
return visitor.visitIf_stat(self)
else:
return visitor.visitChildren(self)
def if_stat(self):
localctx = MuParser.If_statContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_if_stat)
try:
self.enterOuterAlt(localctx, 1)
self.state = 50
self.match(MuParser.IF)
self.state = 51
self.condition_block()
self.state = 57
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,2,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
self.state = 52
self.match(MuParser.ELSE)
self.state = 53
self.match(MuParser.IF)
self.state = 54
self.condition_block()
self.state = 59
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,2,self._ctx)
self.state = 62
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,3,self._ctx)
if la_ == 1:
self.state = 60
self.match(MuParser.ELSE)
self.state = 61
self.stat_block()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Condition_blockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expr(self):
return self.getTypedRuleContext(MuParser.ExprContext,0)
def stat_block(self):
return self.getTypedRuleContext(MuParser.Stat_blockContext,0)
def getRuleIndex(self):
return MuParser.RULE_condition_block
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterCondition_block" ):
listener.enterCondition_block(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitCondition_block" ):
listener.exitCondition_block(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCondition_block" ):
return visitor.visitCondition_block(self)
else:
return visitor.visitChildren(self)
def condition_block(self):
localctx = MuParser.Condition_blockContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_condition_block)
try:
self.enterOuterAlt(localctx, 1)
self.state = 64
self.expr(0)
self.state = 65
self.stat_block()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class Stat_blockContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def OBRACE(self):
return self.getToken(MuParser.OBRACE, 0)
def block(self):
return self.getTypedRuleContext(MuParser.BlockContext,0)
def CBRACE(self):
return self.getToken(MuParser.CBRACE, 0)
def stat(self):
return self.getTypedRuleContext(MuParser.StatContext,0)
def getRuleIndex(self):
return MuParser.RULE_stat_block
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStat_block" ):
listener.enterStat_block(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStat_block" ):
listener.exitStat_block(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStat_block" ):
return visitor.visitStat_block(self)
else:
return visitor.visitChildren(self)
def stat_block(self):
localctx = MuParser.Stat_blockContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_stat_block)
try:
self.state = 72
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [MuParser.OBRACE]:
self.enterOuterAlt(localctx, 1)
self.state = 67
self.match(MuParser.OBRACE)
self.state = 68
self.block()
self.state = 69
self.match(MuParser.CBRACE)
pass
elif token in [MuParser.T__0, MuParser.IF, MuParser.WHILE, MuParser.LOG, MuParser.ID, MuParser.OTHER]:
self.enterOuterAlt(localctx, 2)
self.state = 71
self.stat()
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class While_statContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def WHILE(self):
return self.getToken(MuParser.WHILE, 0)
def expr(self):
return self.getTypedRuleContext(MuParser.ExprContext,0)
def stat_block(self):
return self.getTypedRuleContext(MuParser.Stat_blockContext,0)
def getRuleIndex(self):
return MuParser.RULE_while_stat
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterWhile_stat" ):
listener.enterWhile_stat(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitWhile_stat" ):
listener.exitWhile_stat(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitWhile_stat" ):
return visitor.visitWhile_stat(self)
else:
return visitor.visitChildren(self)
def while_stat(self):
localctx = MuParser.While_statContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_while_stat)
try:
self.enterOuterAlt(localctx, 1)
self.state = 74
self.match(MuParser.WHILE)
self.state = 75
self.expr(0)
self.state = 76
self.stat_block()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class LogContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def LOG(self):
return self.getToken(MuParser.LOG, 0)
def expr(self):
return self.getTypedRuleContext(MuParser.ExprContext,0)
def SCOL(self):
return self.getToken(MuParser.SCOL, 0)
def getRuleIndex(self):
return MuParser.RULE_log
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterLog" ):
listener.enterLog(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitLog" ):
listener.exitLog(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitLog" ):
return visitor.visitLog(self)
else:
return visitor.visitChildren(self)
def log(self):
localctx = MuParser.LogContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_log)
try:
self.enterOuterAlt(localctx, 1)
self.state = 78
self.match(MuParser.LOG)
self.state = 79
self.expr(0)
self.state = 80
self.match(MuParser.SCOL)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExprContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return MuParser.RULE_expr
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class NotExprContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MuParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def NOT(self):
return self.getToken(MuParser.NOT, 0)
def expr(self):
return self.getTypedRuleContext(MuParser.ExprContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNotExpr" ):
listener.enterNotExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNotExpr" ):
listener.exitNotExpr(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNotExpr" ):
return visitor.visitNotExpr(self)
else:
return visitor.visitChildren(self)
class UnaryMinusExprContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MuParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def MINUS(self):
return self.getToken(MuParser.MINUS, 0)
def expr(self):
return self.getTypedRuleContext(MuParser.ExprContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterUnaryMinusExpr" ):
listener.enterUnaryMinusExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitUnaryMinusExpr" ):
listener.exitUnaryMinusExpr(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUnaryMinusExpr" ):
return visitor.visitUnaryMinusExpr(self)
else:
return visitor.visitChildren(self)
class MultiplicationExprContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MuParser.ExprContext
super().__init__(parser)
self.op = None # Token
self.copyFrom(ctx)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(MuParser.ExprContext)
else:
return self.getTypedRuleContext(MuParser.ExprContext,i)
def MULT(self):
return self.getToken(MuParser.MULT, 0)
def DIV(self):
return self.getToken(MuParser.DIV, 0)
def MOD(self):
return self.getToken(MuParser.MOD, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterMultiplicationExpr" ):
listener.enterMultiplicationExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitMultiplicationExpr" ):
listener.exitMultiplicationExpr(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMultiplicationExpr" ):
return visitor.visitMultiplicationExpr(self)
else:
return visitor.visitChildren(self)
class AtomExprContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MuParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def atom(self):
return self.getTypedRuleContext(MuParser.AtomContext,0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAtomExpr" ):
listener.enterAtomExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAtomExpr" ):
listener.exitAtomExpr(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAtomExpr" ):
return visitor.visitAtomExpr(self)
else:
return visitor.visitChildren(self)
class OrExprContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MuParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(MuParser.ExprContext)
else:
return self.getTypedRuleContext(MuParser.ExprContext,i)
def OR(self):
return self.getToken(MuParser.OR, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterOrExpr" ):
listener.enterOrExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitOrExpr" ):
listener.exitOrExpr(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitOrExpr" ):
return visitor.visitOrExpr(self)
else:
return visitor.visitChildren(self)
class AdditiveExprContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MuParser.ExprContext
super().__init__(parser)
self.op = None # Token
self.copyFrom(ctx)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(MuParser.ExprContext)
else:
return self.getTypedRuleContext(MuParser.ExprContext,i)
def PLUS(self):
return self.getToken(MuParser.PLUS, 0)
def MINUS(self):
return self.getToken(MuParser.MINUS, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAdditiveExpr" ):
listener.enterAdditiveExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAdditiveExpr" ):
listener.exitAdditiveExpr(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAdditiveExpr" ):
return visitor.visitAdditiveExpr(self)
else:
return visitor.visitChildren(self)
class PowExprContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MuParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(MuParser.ExprContext)
else:
return self.getTypedRuleContext(MuParser.ExprContext,i)
def POW(self):
return self.getToken(MuParser.POW, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterPowExpr" ):
listener.enterPowExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitPowExpr" ):
listener.exitPowExpr(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitPowExpr" ):
return visitor.visitPowExpr(self)
else:
return visitor.visitChildren(self)
class RelationalExprContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MuParser.ExprContext
super().__init__(parser)
self.op = None # Token
self.copyFrom(ctx)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(MuParser.ExprContext)
else:
return self.getTypedRuleContext(MuParser.ExprContext,i)
def LTEQ(self):
return self.getToken(MuParser.LTEQ, 0)
def GTEQ(self):
return self.getToken(MuParser.GTEQ, 0)
def LT(self):
return self.getToken(MuParser.LT, 0)
def GT(self):
return self.getToken(MuParser.GT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterRelationalExpr" ):
listener.enterRelationalExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitRelationalExpr" ):
listener.exitRelationalExpr(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitRelationalExpr" ):
return visitor.visitRelationalExpr(self)
else:
return visitor.visitChildren(self)
class EqualityExprContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MuParser.ExprContext
super().__init__(parser)
self.op = None # Token
self.copyFrom(ctx)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(MuParser.ExprContext)
else:
return self.getTypedRuleContext(MuParser.ExprContext,i)
def EQ(self):
return self.getToken(MuParser.EQ, 0)
def NEQ(self):
return self.getToken(MuParser.NEQ, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterEqualityExpr" ):
listener.enterEqualityExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitEqualityExpr" ):
listener.exitEqualityExpr(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitEqualityExpr" ):
return visitor.visitEqualityExpr(self)
else:
return visitor.visitChildren(self)
class AndExprContext(ExprContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MuParser.ExprContext
super().__init__(parser)
self.copyFrom(ctx)
def expr(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(MuParser.ExprContext)
else:
return self.getTypedRuleContext(MuParser.ExprContext,i)
def AND(self):
return self.getToken(MuParser.AND, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterAndExpr" ):
listener.enterAndExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitAndExpr" ):
listener.exitAndExpr(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAndExpr" ):
return visitor.visitAndExpr(self)
else:
return visitor.visitChildren(self)
def expr(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = MuParser.ExprContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 20
self.enterRecursionRule(localctx, 20, self.RULE_expr, _p)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 88
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [MuParser.MINUS]:
localctx = MuParser.UnaryMinusExprContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 83
self.match(MuParser.MINUS)
self.state = 84
self.expr(9)
pass
elif token in [MuParser.NOT]:
localctx = MuParser.NotExprContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 85
self.match(MuParser.NOT)
self.state = 86
self.expr(8)
pass
elif token in [MuParser.OPAR, MuParser.TRUE, MuParser.FALSE, MuParser.NIL, MuParser.ID, MuParser.INT, MuParser.FLOAT, MuParser.STRING]:
localctx = MuParser.AtomExprContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 87
self.atom()
pass
else:
raise NoViableAltException(self)
self._ctx.stop = self._input.LT(-1)
self.state = 113
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,7,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 111
self._errHandler.sync(self)
la_ = self._interp.adaptivePredict(self._input,6,self._ctx)
if la_ == 1:
localctx = MuParser.PowExprContext(self, MuParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 90
if not self.precpred(self._ctx, 10):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 10)")
self.state = 91
self.match(MuParser.POW)
self.state = 92
self.expr(10)
pass
elif la_ == 2:
localctx = MuParser.MultiplicationExprContext(self, MuParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 93
if not self.precpred(self._ctx, 7):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 7)")
self.state = 94
localctx.op = self._input.LT(1)
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << MuParser.MULT) | (1 << MuParser.DIV) | (1 << MuParser.MOD))) != 0)):
localctx.op = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 95
self.expr(8)
pass
elif la_ == 3:
localctx = MuParser.AdditiveExprContext(self, MuParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 96
if not self.precpred(self._ctx, 6):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 6)")
self.state = 97
localctx.op = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==MuParser.PLUS or _la==MuParser.MINUS):
localctx.op = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 98
self.expr(7)
pass
elif la_ == 4:
localctx = MuParser.RelationalExprContext(self, MuParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 99
if not self.precpred(self._ctx, 5):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 5)")
self.state = 100
localctx.op = self._input.LT(1)
_la = self._input.LA(1)
if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << MuParser.GT) | (1 << MuParser.LT) | (1 << MuParser.GTEQ) | (1 << MuParser.LTEQ))) != 0)):
localctx.op = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 101
self.expr(6)
pass
elif la_ == 5:
localctx = MuParser.EqualityExprContext(self, MuParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 102
if not self.precpred(self._ctx, 4):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 4)")
self.state = 103
localctx.op = self._input.LT(1)
_la = self._input.LA(1)
if not(_la==MuParser.EQ or _la==MuParser.NEQ):
localctx.op = self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
self.state = 104
self.expr(5)
pass
elif la_ == 6:
localctx = MuParser.AndExprContext(self, MuParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 105
if not self.precpred(self._ctx, 3):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 3)")
self.state = 106
self.match(MuParser.AND)
self.state = 107
self.expr(4)
pass
elif la_ == 7:
localctx = MuParser.OrExprContext(self, MuParser.ExprContext(self, _parentctx, _parentState))
self.pushNewRecursionContext(localctx, _startState, self.RULE_expr)
self.state = 108
if not self.precpred(self._ctx, 2):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 2)")
self.state = 109
self.match(MuParser.OR)
self.state = 110
self.expr(3)
pass
self.state = 115
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,7,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class AtomContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return MuParser.RULE_atom
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class ParExprContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MuParser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def OPAR(self):
return self.getToken(MuParser.OPAR, 0)
def expr(self):
return self.getTypedRuleContext(MuParser.ExprContext,0)
def CPAR(self):
return self.getToken(MuParser.CPAR, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterParExpr" ):
listener.enterParExpr(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitParExpr" ):
listener.exitParExpr(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParExpr" ):
return visitor.visitParExpr(self)
else:
return visitor.visitChildren(self)
class BooleanAtomContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MuParser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def TRUE(self):
return self.getToken(MuParser.TRUE, 0)
def FALSE(self):
return self.getToken(MuParser.FALSE, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterBooleanAtom" ):
listener.enterBooleanAtom(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitBooleanAtom" ):
listener.exitBooleanAtom(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitBooleanAtom" ):
return visitor.visitBooleanAtom(self)
else:
return visitor.visitChildren(self)
class IdAtomContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MuParser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def ID(self):
return self.getToken(MuParser.ID, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterIdAtom" ):
listener.enterIdAtom(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitIdAtom" ):
listener.exitIdAtom(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIdAtom" ):
return visitor.visitIdAtom(self)
else:
return visitor.visitChildren(self)
class StringAtomContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MuParser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def STRING(self):
return self.getToken(MuParser.STRING, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterStringAtom" ):
listener.enterStringAtom(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitStringAtom" ):
listener.exitStringAtom(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStringAtom" ):
return visitor.visitStringAtom(self)
else:
return visitor.visitChildren(self)
class NilAtomContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MuParser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def NIL(self):
return self.getToken(MuParser.NIL, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNilAtom" ):
listener.enterNilAtom(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNilAtom" ):
listener.exitNilAtom(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNilAtom" ):
return visitor.visitNilAtom(self)
else:
return visitor.visitChildren(self)
class NumberAtomContext(AtomContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a MuParser.AtomContext
super().__init__(parser)
self.copyFrom(ctx)
def INT(self):
return self.getToken(MuParser.INT, 0)
def FLOAT(self):
return self.getToken(MuParser.FLOAT, 0)
def enterRule(self, listener:ParseTreeListener):
if hasattr( listener, "enterNumberAtom" ):
listener.enterNumberAtom(self)
def exitRule(self, listener:ParseTreeListener):
if hasattr( listener, "exitNumberAtom" ):
listener.exitNumberAtom(self)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNumberAtom" ):
return visitor.visitNumberAtom(self)
else:
return visitor.visitChildren(self)
def atom(self):
localctx = MuParser.AtomContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_atom)
self._la = 0 # Token type
try:
self.state = 125
self._errHandler.sync(self)
token = self._input.LA(1)
if token in [MuParser.OPAR]:
localctx = MuParser.ParExprContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 116
self.match(MuParser.OPAR)
self.state = 117
self.expr(0)
self.state = 118
self.match(MuParser.CPAR)
pass
elif token in [MuParser.INT, MuParser.FLOAT]:
localctx = MuParser.NumberAtomContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 120
_la = self._input.LA(1)
if not(_la==MuParser.INT or _la==MuParser.FLOAT):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
pass
elif token in [MuParser.TRUE, MuParser.FALSE]:
localctx = MuParser.BooleanAtomContext(self, localctx)
self.enterOuterAlt(localctx, 3)
self.state = 121
_la = self._input.LA(1)
if not(_la==MuParser.TRUE or _la==MuParser.FALSE):
self._errHandler.recoverInline(self)
else:
self._errHandler.reportMatch(self)
self.consume()
pass
elif token in [MuParser.ID]:
localctx = MuParser.IdAtomContext(self, localctx)
self.enterOuterAlt(localctx, 4)
self.state = 122
self.match(MuParser.ID)
pass
elif token in [MuParser.STRING]:
localctx = MuParser.StringAtomContext(self, localctx)
self.enterOuterAlt(localctx, 5)
self.state = 123
self.match(MuParser.STRING)
pass
elif token in [MuParser.NIL]:
localctx = MuParser.NilAtomContext(self, localctx)
self.enterOuterAlt(localctx, 6)
self.state = 124
self.match(MuParser.NIL)
pass
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):
if self._predicates == None:
self._predicates = dict()
self._predicates[10] = self.expr_sempred
pred = self._predicates.get(ruleIndex, None)
if pred is None:
raise Exception("No predicate with index:" + str(ruleIndex))
else:
return pred(localctx, predIndex)
def expr_sempred(self, localctx:ExprContext, predIndex:int):
if predIndex == 0:
return self.precpred(self._ctx, 10)
if predIndex == 1:
return self.precpred(self._ctx, 7)
if predIndex == 2:
return self.precpred(self._ctx, 6)
if predIndex == 3:
return self.precpred(self._ctx, 5)
if predIndex == 4:
return self.precpred(self._ctx, 4)
if predIndex == 5:
return self.precpred(self._ctx, 3)
if predIndex == 6:
return self.precpred(self._ctx, 2)
|
python
|
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
import json
with open('config.json') as config_file:
config = json.load(config_file)
k = config['k']
codes_path = config['codes_path']
codes = []
for i in range(k):
codes.append([float(i)/(k-1)*255])
codes = np.array(codes)
print('# codes: %d'%codes.shape[0])
np.save(codes_path,codes)
|
python
|
# Check dla a module w a set of extension directories.
# An extension directory should contain a Setup file
# oraz one albo more .o files albo a lib.a file.
zaimportuj os
zaimportuj parsesetup
def checkextensions(unknown, extensions):
files = []
modules = []
edict = {}
dla e w extensions:
setup = os.path.join(e, 'Setup')
liba = os.path.join(e, 'lib.a')
jeżeli nie os.path.isfile(liba):
liba = Nic
edict[e] = parsesetup.getsetupinfo(setup), liba
dla mod w unknown:
dla e w extensions:
(mods, vars), liba = edict[e]
jeżeli mod nie w mods:
kontynuuj
modules.append(mod)
jeżeli liba:
# If we find a lib.a, use it, ignore the
# .o files, oraz use *all* libraries for
# *all* modules w the Setup file
jeżeli liba w files:
przerwij
files.append(liba)
dla m w list(mods.keys()):
files = files + select(e, mods, vars,
m, 1)
przerwij
files = files + select(e, mods, vars, mod, 0)
przerwij
zwróć files, modules
def select(e, mods, vars, mod, skipofiles):
files = []
dla w w mods[mod]:
w = treatword(w)
jeżeli nie w:
kontynuuj
w = expandvars(w, vars)
dla w w w.split():
jeżeli skipofiles oraz w[-2:] == '.o':
kontynuuj
# Assume $var expands to absolute pathname
jeżeli w[0] nie w ('-', '$') oraz w[-2:] w ('.o', '.a'):
w = os.path.join(e, w)
jeżeli w[:2] w ('-L', '-R') oraz w[2:3] != '$':
w = w[:2] + os.path.join(e, w[2:])
files.append(w)
zwróć files
cc_flags = ['-I', '-D', '-U']
cc_exts = ['.c', '.C', '.cc', '.c++']
def treatword(w):
jeżeli w[:2] w cc_flags:
zwróć Nic
jeżeli w[:1] == '-':
zwróć w # Assume loader flag
head, tail = os.path.split(w)
base, ext = os.path.splitext(tail)
jeżeli ext w cc_exts:
tail = base + '.o'
w = os.path.join(head, tail)
zwróć w
def expandvars(str, vars):
i = 0
dopóki i < len(str):
i = k = str.find('$', i)
jeżeli i < 0:
przerwij
i = i+1
var = str[i:i+1]
i = i+1
jeżeli var == '(':
j = str.find(')', i)
jeżeli j < 0:
przerwij
var = str[i:j]
i = j+1
jeżeli var w vars:
str = str[:k] + vars[var] + str[i:]
i = k
zwróć str
|
python
|
"""A basic focus script for slitviewers
(changes will be required for gcam and instruments).
Subclass for more functionality.
Take a series of exposures at different focus positions to estimate best focus.
Note:
- The script runs in two phases:
1) If a slitviewer:
Move the boresight and take an exposure. Then pause.
The user is expected to acquire a suitable star before resuming.
Once this phase begins (i.e. once you start the script)
changes to boresight offset are ignored.
Other imagers:
Take an exposure and look for the best centroidable star. Then pause.
The user is expected to acquire a suitable star before resuming.
2) Take the focus sweep.
Once this phase begins all inputs are ignored.
History:
2006-11-07 ROwen From DIS:Focus, which was from NICFPS:Focus.
2006-11-09 ROwen Removed use of plotAxis.autoscale_view(scalex=False, scaley=True)
since it was not compatible with older versions of matplotlib.
Stopped using float("nan") since it doesn't work on all pythons.
Modified to always pause before the focus sweep.
Modified to window the exposure.
2006-11-13 ROwen Modified to have user set center focus and range.
Added Expose and Sweep buttons.
2006-12-01 ROwen Refactored to make it easier to use for non-slitviewers:
- Added waitFocusSweep method.
- Modified to use focPosFWHMList instead of two lists.
Improved sanity-checking the best focus fit.
Created SlitviewerFocusScript and OffsetGuiderFocusScript classes;
the latter is not yet fully written.
2006-12-08 ROwen More refactoring. Created ImagerFocusScript class.
Needs extensive testing.
2006-12-13 ROwen Added Find button and changed Centroid to Measure.
Data is always nulled at start of sweep. This is much easier than
trying to be figure out when I can safely keep existing data.
Fit error is logged.
Fit is logged and graphed even if fit is rejected (unless fit is a maximum).
Changed from Numeric to numarray to avoid a bug in matplotlib 0.87.7
Changed test for max fit focus error to a multiple of the focus range.
2006-12-28 ROwen Bug fix: tried to send <inst>Expose time=<time> bin=<binfac>
command for imaging instruments. The correct command is:
<inst>Expose object time=<time>.
Noted that bin factor and window must be configured via special
instrument-specific commands.
ImagerFocusScript no longer makes use of windowing (while centroiding),
though a subclass could do so.
2006-12-28 ROwen ImagerFocusScript.waitExpose now aborts the exposure if the script is aborted.
This change did not get into TUI 1.3a11. Note that this fix only applies to imaging
instruments; there is not yet any documented way to abort a guider exposure.
2007-01-02 ROwen Fixed a bug in waitExpose: <inst> <inst>Expose -> <instExpose>.
Fixed a bug in waitFindStar: centroidRad used but not supplied.
Improved help text for Star Pos entry widgets.
2007-01-03 ROwen Bug fixes:
- Used sr instead of self.sr in two places.
- ImagerFocusScript.getCentroidArgs returned bad
starpos due to wanting to window.
- ImagerFocusScript.waitCentroid failed if no star found
rather than returning sr.value = None.
2007-01-12 ROwen Added a threshold for star finding (maxFindAmpl).
Added logging of sky and star amplitude.
2007-01-26 ROwen Tweak various formats:
- All reported and command floats use %0.xf (some used %.xf).
- Focus is rounded to nearest integer for logging and setting.
If new focus found, set Center Focus to the new value.
Increased minimum # of focus positions from 2 to 3.
Bug fix: if only 3 measurements, divided by zero while computing std. dev.
Bug fix: could not restore initial focus (missing = in set focus command).
Minor bug fix: focus interval was computed as int, not float.
2007-01-29 ROwen Improved OffsetGuiderFocusScript to get guider info based on instPos
instead of insisting that the guider be the current instrument.
Modified to take advantage of RO.Wdg.Entry's new label attribute.
2007-01-29 ROwen Fixed ImagerFocusScript (it was giving an illegal arg to OffsetGuiderFocusScript).
Refactored so run is in BaseFocusScript and ImagerFocusScript inherits from that.
Renamed extraSetup method to waitExtraSetup.
2007-02-13 ROwen Added a Clear button.
Never auto-clears the log.
Waits to auto-clear the graph until new data is about to be graphed.
Simplified graph range handling.
2007-04-24 ROwen Modified to use numpy instead of numarray.
2007-06-01 ROwen Hacked in support for sfocus for SPIcam.
2007-06-04 ROwen Added doWindow argument to BaseFocusScript.
2007-07-25 ROwen ImagerFocusScript modified to sending windowing info as part of the expose command
if windowing is being used (due to improvements in spicamExpose).
Pings the gcam actor when it starts. This eliminates the situation where the actor
is dead and the script should halt, but keeps exposing and reporting fwhm=NaN instead.
2007-07-26 ROwen Added user-settable bin factor.
Modified to take a final exposure (after restoring boresight) if boresight moved.
2007-07-27 ROwen Increased the fidelity of debug mode and fixed some bugs.
2007-07-30 ROwen Added windowOrigin and windowIsInclusive arguments.
Bug fix: if the user changed the bin factor during script execution,
it would change the bin factor used in the script (and not necessarily properly).
2007-09-12 ROwen SlitviewerFocusScript bug fix: Cancel would fail if no image ever taken.
2007-12-20 ROwen Moved matplotlib configuration statements to TUI's startup because
in matplotlib 0.91.1 one may not call "use" after importing matplotlib.backends.
2008-01-24 ROwen BaseFocusScript bug fixes:
- PR 686: Find button broken (waitFindStar ran "expose" instead of "findstars"
and so never found anything.).
- recordUserParams didn't round window so relStarPos could be off by a fraction of a pixel.
2008-01-25 ROwen Added a digit after the decimal point for reporting fwhm in arcsec.
Implemented a lower limit on focus increment.
2008-02-01 ROwen Changed configuration constants from globals to class variables of BaseFocusScript
so subclasses can more easily override them.
Fixed debug mode to use proper defaults for number of steps and focus range.
Setting current focus successfully clears the status bar.
2008-03-28 ROwen PR 775: used exposeModel in classes where it did not exist.
Fixed by adding tccInstPrefix argument.
2008-04-02 ROwen PR 781: Many focus scripts fail to start with TypeError...:
BaseFocusScript.getInstInfo was missing () on a string method lower()
2008-04-22 ROwen Modified to use new Log.addMsg method.
2008-04-23 ROwen Added some diagnostic output for PR 777 and its kin.
2008-04-29 ROwen Open guide image window *after* checking for correct instrument.
2008-08-14 ROwen CR 818: take a final full-frame exposure if script windows
(or, as before, if boresight was restored).
2009-03-02 ROwen Added a brief header for PR 777 diagnostic output.
2010-03-12 ROwen Changed to use Models.getModel.
2015-11-03 ROwen Replace "== None" with "is None" and "!= None" with "is not None" to modernize the code.
"""
import inspect
import math
import random # for debug
import numpy
import Tkinter
import RO.Wdg
import RO.CnvUtil
import RO.Constants
import RO.StringUtil
import TUI.Models
import TUI.Inst.ExposeModel
import matplotlib
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
MicronStr = RO.StringUtil.MuStr + "m"
def formatNum(val, fmt="%0.1f"):
"""Convert a number into a string
None is returned as NaN
"""
if val is None:
return "NaN"
try:
return fmt % (val,)
except TypeError:
raise TypeError("formatNum failed on fmt=%r, val=%r" % (fmt, val))
class Extremes(object):
"""Class to keep track of minimum and maximum value.
"""
def __init__(self, val=None):
self.minVal = None
self.maxVal = None
if val is not None:
self.addVal(val)
def addVal(self, val):
if val is None:
return
if self.isOK():
self.minVal = min(self.minVal, val)
self.maxVal = max(self.maxVal, val)
else:
self.minVal = val
self.maxVal = val
def isOK(self):
return self.minVal is not None
def __eq__(self, other):
return (self.minVal == other.minVal) and (self.maxVal == other.maxVal)
def __str__(self):
return "[%s, %s]" % (self.minVal, self.maxVal)
def __repr__(self):
return "Extremes(%s, %s)" % (self.minVal, self.maxVal)
class StarMeas(object):
def __init__(self,
xyPos = None,
sky = None,
ampl = None,
fwhm = None,
):
self.xyPos = xyPos
self.sky = sky
self.ampl = ampl
self.fwhm = fwhm
def fromStarKey(cls, starKeyData):
"""Create an instance from star keyword data.
"""
return cls(
fwhm = starKeyData[8],
sky = starKeyData[13],
ampl = starKeyData[14],
xyPos = starKeyData[2:4],
)
fromStarKey = classmethod(fromStarKey)
def makeStarData(
typeChar = "f",
xyPos = (10.0, 10.0),
sky = 200,
ampl = 1500,
fwhm = 2.5,
):
"""Make a list containing one star data list for debug mode"""
xyPos = [float(xyPos[ii]) for ii in range(2)]
fwhm = float(fwhm)
return [[typeChar, 1, xyPos[0], xyPos[1], 1.0, 1.0, fwhm * 5, 1, fwhm, fwhm, 0, 0, ampl, sky, ampl]]
class BaseFocusScript(object):
"""Basic focus script object.
This is a virtual base class. The inheritor must:
- Provide widgets
- Provide a "run" method
Inputs:
- gcamActor: name of guide camera actor (e.g. "dcam")
- instName: name of instrument (e.g. "DIS"); must be a name known to TUI.Inst.ExposeModel.
- tccInstPrefix: instrument name as known by the TCC; defaults to instName;
if the instrument has multiple names in the TCC then supply the common prefix
- imageViewerTLName: name of image viewer toplevel (e.g. "Guide.DIS Slitviewer")
- defRadius: default centroid radius, in arcsec
- defBinFactor: default bin factor; if None then bin factor cannot be set
- canSetStarPos: if True the user can set the star position;
if False then the Star Pos entries and Find button are not shown.
- maxFindAmpl: maximum star amplitude for finding stars (peak - sky in ADUs);
if None then star finding is disabled.
- doWindow: if True, subframe images during focus sequence
- windowOrigin: index of left or lower pixel for window (0 or 1 unless very wierd);
this is not use for star positions, which all have the same convention
- windowIsInclusive: is the upper-right window coord included in the image?
- helpURL: URL of help file
- debug: if True, run in debug mode, which uses fake data and does not communicate with the hub.
"""
cmd_Find = "find"
cmd_Measure = "measure"
cmd_Sweep = "sweep"
# constants
#DefRadius = 5.0 # centroid radius, in arcsec
#NewStarRad = 2.0 # amount of star position change to be considered a new star
DefFocusNPos = 5 # number of focus positions
DefFocusRange = 200 # default focus range around current focus
FocusWaitMS = 1000 # time to wait after every focus adjustment (ms)
BacklashComp = 0 # amount of backlash compensation, in microns (0 for none)
WinSizeMult = 2.5 # window radius = centroid radius * WinSizeMult
FocGraphMargin = 5 # margin on graph for x axis limits, in um
MaxFocSigmaFac = 0.5 # maximum allowed sigma of best fit focus as a multiple of focus range
MinFocusIncr = 50 # minimum focus increment, in um
def __init__(self,
sr,
gcamActor,
instName,
tccInstPrefix = None,
imageViewerTLName = None,
defRadius = 5.0,
defBinFactor = 1,
canSetStarPos = True,
maxFindAmpl = None,
doWindow = True,
windowOrigin = 0,
windowIsInclusive = True,
helpURL = None,
debug = False,
):
"""The setup script; run once when the script runner
window is created.
"""
self.sr = sr
sr.debug = bool(debug)
self.gcamActor = gcamActor
self.instName = instName
self.tccInstPrefix = tccInstPrefix or self.instName
self.imageViewerTLName = imageViewerTLName
if defBinFactor is None:
self.defBinFactor = None
self.binFactor = 1
self.dispBinFactor = 1
else:
self.defBinFactor = int(defBinFactor)
self.binFactor = self.defBinFactor
self.dispBinFactor = self.defBinFactor
self.defRadius = defRadius
self.helpURL = helpURL
self.canSetStarPos = canSetStarPos
self.maxFindAmpl = maxFindAmpl
self.doWindow = bool(doWindow)
self.windowOrigin = int(windowOrigin)
self.windowIsInclusive = bool(windowIsInclusive)
# fake data for debug mode
self.debugIterFWHM = None
# get various models
self.tccModel = TUI.Models.getModel("tcc")
self.tuiModel = TUI.Models.getModel("tui")
self.guideModel = TUI.Models.getModel(self.gcamActor)
# create and grid widgets
self.gr = RO.Wdg.Gridder(sr.master, sticky="ew")
self.createSpecialWdg()
self.createStdWdg()
self.initAll()
# try to get focus away from graph (but it doesn't work; why?)
self.expTimeWdg.focus_set()
self.setCurrFocus()
def createSpecialWdg(self):
"""Create script-specific widgets.
"""
pass
def createStdWdg(self):
"""Create the standard widgets.
"""
sr = self.sr
self.expTimeWdg = RO.Wdg.FloatEntry(
sr.master,
label = "Exposure Time",
minValue = self.guideModel.gcamInfo.minExpTime,
maxValue = self.guideModel.gcamInfo.maxExpTime,
defValue = self.guideModel.gcamInfo.defExpTime,
defFormat = "%0.1f",
defMenu = "Default",
minMenu = "Minimum",
helpText = "Exposure time",
helpURL = self.helpURL,
)
self.gr.gridWdg(self.expTimeWdg.label, self.expTimeWdg, "sec")
self.binFactorWdg = RO.Wdg.IntEntry(
master = sr.master,
label = "Bin Factor",
minValue = 1,
maxValue = 1024,
defValue = self.defBinFactor or 1,
defMenu = "Default",
callFunc = self.updBinFactor,
helpText = "Bin factor (for rows and columns)",
helpURL = self.helpURL,
)
if self.defBinFactor is not None:
self.gr.gridWdg(self.binFactorWdg.label, self.binFactorWdg)
self.starPosWdgSet = []
for ii in range(2):
letter = ("X", "Y")[ii]
starPosWdg = RO.Wdg.FloatEntry(
master = sr.master,
label = "Star Pos %s" % (letter,),
minValue = 0,
maxValue = 5000,
helpText = "Star %s position (binned, full frame)" % (letter,),
helpURL = self.helpURL,
)
if self.canSetStarPos:
self.gr.gridWdg(starPosWdg.label, starPosWdg, "pix")
self.starPosWdgSet.append(starPosWdg)
self.centroidRadWdg = RO.Wdg.IntEntry(
master = sr.master,
label = "Centroid Radius",
minValue = 5,
maxValue = 1024,
defValue = self.defRadius,
defMenu = "Default",
helpText = "Centroid radius; don't skimp",
helpURL = self.helpURL,
)
self.gr.gridWdg(self.centroidRadWdg.label, self.centroidRadWdg, "arcsec", sticky="ew")
setCurrFocusWdg = RO.Wdg.Button(
master = sr.master,
text = "Center Focus",
callFunc = self.setCurrFocus,
helpText = "Set to current focus",
helpURL = self.helpURL,
)
self.centerFocPosWdg = RO.Wdg.IntEntry(
master = sr.master,
label = "Center Focus",
defValue = 0,
defMenu = "Default",
helpText = "Center of focus sweep",
helpURL = self.helpURL,
)
self.gr.gridWdg(setCurrFocusWdg, self.centerFocPosWdg, MicronStr)
self.focusRangeWdg = RO.Wdg.IntEntry(
master = sr.master,
label = "Focus Range",
maxValue = self.DefFocusRange * 10,
defValue = self.DefFocusRange,
defMenu = "Default",
helpText = "Range of focus sweep",
helpURL = self.helpURL,
)
self.gr.gridWdg(self.focusRangeWdg.label, self.focusRangeWdg, MicronStr)
self.numFocusPosWdg = RO.Wdg.IntEntry(
master = sr.master,
label = "Focus Positions",
minValue = 3,
defValue = self.DefFocusNPos,
defMenu = "Default",
helpText = "Number of focus positions for sweep",
helpURL = self.helpURL,
)
self.gr.gridWdg(self.numFocusPosWdg.label, self.numFocusPosWdg, "")
self.focusIncrWdg = RO.Wdg.FloatEntry(
master = sr.master,
label = "Focus Increment",
defFormat = "%0.1f",
readOnly = True,
relief = "flat",
helpText = "Focus step size; must be at least %s %s" % (self.MinFocusIncr, MicronStr),
helpURL = self.helpURL,
)
self.gr.gridWdg(self.focusIncrWdg.label, self.focusIncrWdg, MicronStr)
# create the move to best focus checkbox
self.moveBestFocus = RO.Wdg.Checkbutton(
master = sr.master,
text = "Move to Best Focus",
defValue = True,
relief = "flat",
helpText = "Move to estimated best focus and measure FWHM after sweep?",
helpURL = self.helpURL,
)
self.gr.gridWdg(None, self.moveBestFocus, colSpan = 3, sticky="w")
graphCol = self.gr.getNextCol()
graphRowSpan = self.gr.getNextRow()
# table of measurements (including separate unscrolled header)
TableWidth = 32
self.logHeader = RO.Wdg.Text(
master = sr.master,
readOnly = True,
height = 2,
width = TableWidth,
helpText = "Measured and fit results",
helpURL = self.helpURL,
relief = "sunken",
bd = 0,
)
self.logHeader.insert("0.0", """\tfocus\tFWHM\tFWHM\tsky\tampl\tsky+ampl
\t%s\tpixels\tarcsec\tADUs\tADUs\tADUs""" % MicronStr)
self.logHeader.setEnable(False)
self.gr.gridWdg(False, self.logHeader, sticky="ew", colSpan = 10)
self.logWdg = RO.Wdg.LogWdg(
master = sr.master,
height = 10,
width = TableWidth,
helpText = "Measured and fit results",
helpURL = self.helpURL,
relief = "sunken",
bd = 2,
)
self.gr.gridWdg(False, self.logWdg, sticky="ew", colSpan = 10)
# graph of measurements
plotFig = matplotlib.figure.Figure(figsize=(4, 1), frameon=True)
self.figCanvas = FigureCanvasTkAgg(plotFig, sr.master)
self.figCanvas.get_tk_widget().grid(row=0, column=graphCol, rowspan=graphRowSpan, sticky="news")
self.plotAxis = plotFig.add_subplot(1, 1, 1)
self.focusRangeWdg.addCallback(self.updFocusIncr, callNow=False)
self.numFocusPosWdg.addCallback(self.updFocusIncr, callNow=True)
# add command buttons
cmdBtnFrame = Tkinter.Frame(sr.master)
self.findBtn = RO.Wdg.Button(
master = cmdBtnFrame,
text = "Find",
callFunc = RO.Alg.GenericCallback(self.doCmd, self.cmd_Find),
helpText = "Update focus, expose and find best star",
helpURL = self.helpURL,
)
if self.maxFindAmpl is not None:
self.findBtn.pack(side="left")
self.measureBtn = RO.Wdg.Button(
master = cmdBtnFrame,
text = "Measure",
callFunc = RO.Alg.GenericCallback(self.doCmd, self.cmd_Measure),
helpText = "Update focus, expose and measure FWHM",
helpURL = self.helpURL,
)
self.measureBtn.pack(side="left")
self.sweepBtn = RO.Wdg.Button(
master = cmdBtnFrame,
text = "Sweep",
callFunc = RO.Alg.GenericCallback(self.doCmd, self.cmd_Sweep),
helpText = "Start focus sweep",
helpURL = self.helpURL,
)
self.sweepBtn.pack(side="left")
self.clearBtn = RO.Wdg.Button(
master = cmdBtnFrame,
text = "Clear",
callFunc = self.doClear,
helpText = "Clear table and graph",
helpURL = self.helpURL,
)
self.clearBtn.pack(side="right")
nCol = self.gr.getMaxNextCol()
self.gr.gridWdg(False, cmdBtnFrame, colSpan=nCol)
if sr.debug:
self.expTimeWdg.set("1")
self.centerFocPosWdg.set(0)
def clearGraph(self):
self.plotAxis.clear()
self.plotAxis.grid(True)
# start with autoscale disabled due to bug in matplotlib
self.plotAxis.set_autoscale_on(False)
self.figCanvas.draw()
self.plotLine = None
def doClear(self, wdg=None):
self.logWdg.clearOutput()
self.clearGraph()
def doCmd(self, cmdMode, wdg=None):
if cmdMode not in (
self.cmd_Measure,
self.cmd_Find,
self.cmd_Sweep,
):
raise self.sr.RuntimeError("Unknown command mode %r" % (cmdMode,))
self.cmdMode = cmdMode
self.sr.resumeUser()
def enableCmdBtns(self, doEnable):
"""Enable or disable command buttons (e.g. Expose and Sweep).
"""
self.findBtn.setEnable(doEnable)
self.measureBtn.setEnable(doEnable)
self.sweepBtn.setEnable(doEnable)
self.clearBtn.setEnable(doEnable)
def end(self, sr):
"""Run when script exits (normally or due to error)
"""
self.enableCmdBtns(False)
if self.focPosToRestore is not None:
tccCmdStr = "set focus=%0.0f" % (self.focPosToRestore,)
if self.sr.debug:
print "end is restoring the focus: %r" % tccCmdStr
sr.startCmd(
actor = "tcc",
cmdStr = tccCmdStr,
)
doRestoreBoresight = self.begBoreXYDeg != self.currBoreXYDeg
if doRestoreBoresight:
if self.sr.debug:
print "end is restoring the boresight"
self.moveBoresight(
self.begBoreXYDeg,
doWait = False,
)
if self.didTakeImage and (self.doWindow or doRestoreBoresight):
if self.sr.debug:
print "end is taking a final exposure"
exposeCmdDict = self.getExposeCmdDict(doWindow=False)
sr.startCmd(**exposeCmdDict)
def formatBinFactorArg(self):
"""Return bin factor argument for expose/centroid/findstars command"""
#print "defBinFactor=%r, binFactor=%r" % (self.defBinFactor, self.binFactor)
# if defBinFactor None then bin factor cannot be set
if self.defBinFactor is None:
return ""
return "bin=%d" % (self.binFactor,)
def formatExposeArgs(self, doWindow=True):
"""Format arguments for exposure command.
Inputs:
- doWindow: if true, window the exposure (if permitted)
"""
argList = [
"time=%s" % (self.expTime,),
self.formatBinFactorArg(),
self.formatWindowArg(doWindow),
]
argList = [arg for arg in argList if arg]
return " ".join(argList)
def formatWindowArg(self, doWindow=True):
"""Format window argument for expose/centroid/findstars command.
Inputs:
- doWindow: if true, window the exposure (if permitted)
"""
if not doWindow or not self.doWindow:
return ""
if self.windowIsInclusive:
urOffset = self.windowOrigin
else:
urOffset = self.windowOrigin + 1
windowLL = [self.window[ii] + self.windowOrigin for ii in range(2)]
windowUR = [self.window[ii+2] + urOffset for ii in range(2)]
return "window=%d,%d,%d,%d" % (windowLL[0], windowLL[1], windowUR[0], windowUR[1])
def getInstInfo(self):
"""Obtains instrument data.
Verifies the correct instrument and sets these attributes:
- instScale: x,y image scale in unbinned pixels/degree
- instCtr: x,y image center in unbinned pixels
- instLim: xmin, ymin, xmax, ymax image limits, inclusive, in unbinned pixels
- arcsecPerPixel: image scale in arcsec/unbinned pixel;
average of x and y scales
Raises ScriptError if wrong instrument.
"""
sr = self.sr
if self.tccInstPrefix and not sr.debug:
# Make sure current instrument is correct
try:
currInstName = sr.getKeyVar(self.tccModel.inst)
except sr.ScriptError:
raise sr.ScriptError("current instrument unknown")
if not currInstName.lower().startswith(self.tccInstPrefix.lower()):
raise sr.ScriptError("%s is not the current instrument (%s)!" % (self.instName, currInstName))
self.instScale = sr.getKeyVar(self.tccModel.iimScale, ind=None)
self.instCtr = sr.getKeyVar(self.tccModel.iimCtr, ind=None)
self.instLim = sr.getKeyVar(self.tccModel.iimLim, ind=None)
else:
# data from tcc tinst:I_NA2_DIS.DAT 18-OCT-2006
self.instScale = [-12066.6, 12090.5] # unbinned pixels/deg
self.instCtr = [240, 224]
self.instLim = [0, 0, 524, 511]
self.arcsecPerPixel = 3600.0 * 2 / (abs(self.instScale[0]) + abs(self.instScale[1]))
def getEntryNum(self, wdg):
"""Return the numeric value of a widget, or raise ScriptError if blank.
"""
numVal = wdg.getNumOrNone()
if numVal is not None:
return numVal
raise self.sr.ScriptError(wdg.label + " not specified")
def getExposeCmdDict(self, doWindow=True):
"""Get basic command arument dict for an expose command
This includes actor, cmdStr, abortCmdStr
"""
return dict(
actor = self.gcamActor,
cmdStr = "expose " + self.formatExposeArgs(doWindow),
abortCmdStr = "abort",
)
def graphFocusMeas(self, focPosFWHMList, extremeFocPos=None, extremeFWHM=None):
"""Graph measured fwhm vs focus.
Inputs:
- focPosFWHMList: list of data items:
- focus position (um)
- measured FWHM (binned pixels)
- extremeFocPos: extremes of focus position
- extremeFWHM: extremes of FWHM
- setFocRange: adjust displayed focus range?
extremes are an Extremes object with .minVal and .maxVal
"""
# "graphFocusMeas(focPosFWHMList=%s, extremeFocPos=%r, extremeFWHM=%r)" % (focPosFWHMList, extremeFocPos, extremeFWHM)
numMeas = len(focPosFWHMList)
if numMeas == 0:
return
focList, fwhmList = zip(*focPosFWHMList)
if not self.plotLine:
self.plotLine = self.plotAxis.plot(focList, fwhmList, 'bo')[0]
else:
self.plotLine.set_data(focList[:], fwhmList[:])
self.setGraphRange(extremeFocPos=extremeFocPos, extremeFWHM=extremeFWHM)
def initAll(self):
"""Initialize variables, table and graph.
"""
# initialize shared variables
self.didTakeImage = False
self.focDir = None
self.currBoreXYDeg = None
self.begBoreXYDeg = None
self.instScale = None
self.arcsecPerPixel = None
self.instCtr = None
self.instLim = None
self.cmdMode = None
self.focPosToRestore = None
self.expTime = None
self.absStarPos = None
self.relStarPos = None
self.binFactor = None
self.window = None # LL pixel is 0, UL pixel is included
self.enableCmdBtns(False)
def logFitFWHM(self, name, focPos, fwhm):
"""Log a fit value of FWHM or FWHM error.
"""
if fwhm is not None:
fwhmArcSec = fwhm * self.arcsecPerPixel * self.binFactor
else:
fwhmArcSec = None
dataStrs = (
formatNum(focPos, "%0.0f"),
formatNum(fwhm, "%0.1f"),
formatNum(fwhmArcSec, "%0.2f"),
)
outStr = "%s\t%s" % (name, "\t".join(dataStrs))
self.logWdg.addMsg(outStr)
def logStarMeas(self, name, focPos, starMeas):
"""Log a star measurement.
The name should be less than 8 characters long.
Any or all data fields in starMeas may be None.
Inputs:
- focPos: focus position, in um
- starMeas: StarMeas object
If fwhm is None, it is reported as NaN.
"""
fwhm = starMeas.fwhm
if fwhm is not None:
fwhmArcSec = fwhm * self.arcsecPerPixel * self.binFactor
else:
fwhmArcSec = None
if None not in (starMeas.ampl, starMeas.sky):
skyPlusAmpl = starMeas.ampl + starMeas.sky
else:
skyPlusAmpl = None
dataStrs = (
formatNum(focPos, "%0.0f"),
formatNum(fwhm, "%0.1f"),
formatNum(fwhmArcSec, "%0.2f"),
formatNum(starMeas.sky, "%0.0f"),
formatNum(starMeas.ampl, "%0.0f"),
formatNum(skyPlusAmpl, "%0.0f"),
)
outStr = "%s\t%s" % (name, "\t".join(dataStrs))
self.logWdg.addMsg(outStr)
def recordUserParams(self, doStarPos=True):
"""Record user-set parameters relating to exposures but not to focus
Inputs:
- doStarPos: if true: save star position and related information;
warning: if doStarPos true then there must *be* a valid star position
Set the following instance variables:
- expTime
- centroidRadPix
The following are set to None if doStarPos false:
- absStarPos
- relStarPos
- window
"""
self.expTime = self.getEntryNum(self.expTimeWdg)
self.binFactor = self.dispBinFactor
centroidRadArcSec = self.getEntryNum(self.centroidRadWdg)
self.centroidRadPix = centroidRadArcSec / (self.arcsecPerPixel * self.binFactor)
if doStarPos:
winRad = self.centroidRadPix * self.WinSizeMult
self.absStarPos = [None, None]
for ii in range(2):
wdg = self.starPosWdgSet[ii]
self.absStarPos[ii] = self.getEntryNum(wdg)
if self.doWindow:
windowMinXY = [max(self.instLim[ii], int(0.5 + self.absStarPos[ii] - winRad)) for ii in range(2)]
windowMaxXY = [min(self.instLim[ii-2], int(0.5 + self.absStarPos[ii] + winRad)) for ii in range(2)]
self.window = windowMinXY + windowMaxXY
self.relStarPos = [self.absStarPos[ii] - windowMinXY[ii] for ii in range(2)]
#print "winRad=%s, windowMinXY=%s, relStarPos=%s" % (winRad, windowMinXY, self.relStarPos)
else:
self.window = None
self.relStarPos = self.absStarPos[:]
else:
self.absStarPos = None
self.relStarPos = None
self.window = None
def run(self, sr):
"""Run the focus script.
"""
self.initAll()
# fake data for debug mode
# iteration #, FWHM
self.debugIterFWHM = (1, 2.0)
self.getInstInfo()
yield self.waitExtraSetup()
# open image viewer window, if any
if self.imageViewerTLName:
self.tuiModel.tlSet.makeVisible(self.imageViewerTLName)
self.sr.master.winfo_toplevel().lift()
focPosFWHMList = []
extremeFocPos = Extremes()
extremeFWHM = Extremes()
# check that the gcam actor is alive. This is important because
# centroid commands can fail due to no actor or no star
# so we want to halt in the former case
yield sr.waitCmd(
actor = self.gcamActor,
cmdStr = "ping",
)
# command loop; repeat until error or user explicitly presses Stop
if self.maxFindAmpl is None:
btnStr = "Measure or Sweep"
else:
btnStr = "Find, Measure or Sweep"
waitMsg = "Press %s to continue" % (btnStr,)
testNum = 0
while True:
# wait for user to press the Expose or Sweep button
# note: the only time they should be enabled is during this wait
self.enableCmdBtns(True)
sr.showMsg(waitMsg, RO.Constants.sevWarning)
yield sr.waitUser()
self.enableCmdBtns(False)
if self.cmdMode == self.cmd_Sweep:
break
if testNum == 0:
self.clearGraph()
if self.maxFindAmpl is None:
self.logWdg.addMsg("===== Measure =====")
else:
self.logWdg.addMsg("===== Find/Measure =====")
testNum += 1
focPos = float(self.centerFocPosWdg.get())
if focPos is None:
raise sr.ScriptError("must specify center focus")
yield self.waitSetFocus(focPos, False)
if self.cmdMode == self.cmd_Measure:
cmdName = "Meas"
self.recordUserParams(doStarPos=True)
yield self.waitCentroid()
elif self.cmdMode == self.cmd_Find:
cmdName = "Find"
self.recordUserParams(doStarPos=False)
yield self.waitFindStar()
starData = sr.value
if starData.xyPos is not None:
sr.showMsg("Found star at %0.1f, %0.1f" % tuple(starData.xyPos))
self.setStarPos(starData.xyPos)
else:
raise RuntimeError("Unknown command mode: %r" % (self.cmdMode,))
starMeas = sr.value
self.logStarMeas("%s %d" % (cmdName, testNum,), focPos, starMeas)
fwhm = starMeas.fwhm
if fwhm is None:
waitMsg = "No star found! Fix and then press %s" % (btnStr,)
self.setGraphRange(extremeFocPos=extremeFocPos)
else:
extremeFocPos.addVal(focPos)
extremeFWHM.addVal(starMeas.fwhm)
focPosFWHMList.append((focPos, fwhm))
self.graphFocusMeas(focPosFWHMList, extremeFocPos, extremeFWHM)
waitMsg = "%s done; press %s to continue" % (cmdName, btnStr,)
self.recordUserParams(doStarPos=True)
yield self.waitFocusSweep()
doRestoreBoresight = self.begBoreXYDeg != self.currBoreXYDeg
if doRestoreBoresight:
yield self.moveBoresight(
self.begBoreXYDeg,
msgStr ="Restoring original boresight position",
doWait = True,
)
if self.didTakeImage and (self.doWindow or doRestoreBoresight):
self.didTakeImage = False # to prevent end from taking another image
self.sr.showMsg("Taking a final image")
exposeCmdDict = self.getExposeCmdDict(doWindow=False)
yield sr.waitCmd(**exposeCmdDict)
def setCurrFocus(self, *args):
"""Set center focus to current focus.
"""
currFocus = self.sr.getKeyVar(self.tccModel.secFocus, defVal=None)
if currFocus is None:
self.sr.showMsg("Current focus not known",
severity=RO.Constants.sevWarning,
)
return
self.centerFocPosWdg.set(currFocus)
self.sr.showMsg("")
def setGraphRange(self, extremeFocPos=None, extremeFWHM=None):
"""Sets the displayed range of the graph.
Inputs:
- extremeFocPos: focus extremes
- extremeFWHM: FWHM extremes
"""
# "setGraphRange(extremeFocPos=%s, extremeFWHM=%s)" % (extremeFocPos, extremeFWHM)
if extremeFocPos and extremeFocPos.isOK():
minFoc = extremeFocPos.minVal - self.FocGraphMargin
maxFoc = extremeFocPos.maxVal + self.FocGraphMargin
if maxFoc - minFoc < 50:
minFoc -= 25
maxFoc += 25
self.plotAxis.set_xlim(minFoc, maxFoc)
if extremeFWHM and extremeFWHM.isOK():
minFWHM = extremeFWHM.minVal * 0.95
maxFWHM = extremeFWHM.maxVal * 1.05
self.plotAxis.set_ylim(minFWHM, maxFWHM)
self.figCanvas.draw()
def setStarPos(self, starXYPix):
"""Set star position widgets.
Inputs:
- starXYPix: star x, y position (binned pixels)
"""
for ii in range(2):
wdg = self.starPosWdgSet[ii]
wdg.set(starXYPix[ii])
def updBinFactor(self, *args, **kargs):
"""Called when the user changes the bin factor"""
newBinFactor = self.binFactorWdg.getNum()
if newBinFactor <= 0:
return
oldBinFactor = self.dispBinFactor
if oldBinFactor == newBinFactor:
return
self.dispBinFactor = newBinFactor
# adjust displayed star position
posFactor = float(oldBinFactor) / float(newBinFactor)
for ii in range(2):
oldStarPos = self.starPosWdgSet[ii].getNum()
if oldStarPos == 0:
continue
newStarPos = oldStarPos * posFactor
self.starPosWdgSet[ii].set(newStarPos)
def updFocusIncr(self, *args):
"""Update focus increment widget.
"""
focusRange = self.focusRangeWdg.getNumOrNone()
numPos = self.numFocusPosWdg.getNumOrNone()
if None in (focusRange, numPos):
self.focusIncrWdg.set(None, isCurrent = False)
return
focusIncr = focusRange / float(numPos - 1)
isOK = focusIncr >= self.MinFocusIncr
if not isOK:
errMsg = "Focus increment too small (< %s %s)" % (self.MinFocusIncr, MicronStr)
self.sr.showMsg(errMsg, RO.Constants.sevWarning)
self.focusIncrWdg.set(focusIncr, isCurrent = isOK)
def waitCentroid(self):
"""Take an exposure and centroid using 1x1 binning.
If the centroid is found, sets sr.value to the FWHM.
Otherwise sets sr.value to None.
"""
sr = self.sr
centroidCmdStr = "centroid on=%0.1f,%0.1f cradius=%0.1f %s" % \
(self.relStarPos[0], self.relStarPos[1], self.centroidRadPix, self.formatExposeArgs())
yield sr.waitCmd(
actor = self.gcamActor,
cmdStr = centroidCmdStr,
keyVars = (self.guideModel.files, self.guideModel.star),
checkFail = False,
)
cmdVar = sr.value
self.didTakeImage = True
if sr.debug:
starData = makeStarData("c", self.relStarPos)
else:
starData = cmdVar.getKeyVarData(self.guideModel.star)
if starData:
sr.value = StarMeas.fromStarKey(starData[0])
return
else:
sr.value = StarMeas()
if not cmdVar.getKeyVarData(self.guideModel.files):
raise sr.ScriptError("exposure failed")
def waitExtraSetup(self):
"""Executed once at the start of each run
after calling initAll and getInstInfo but before doing anything else.
Override to do things such as move the boresight or put the instrument into a particular mode.
"""
yield self.sr.waitMS(1)
def waitFindStar(self):
"""Take a full-frame exposure and find the best star that can be centroided.
Sets sr.value to StarMeas.
Displays a warning if no star found.
"""
sr = self.sr
if self.maxFindAmpl is None:
raise RuntimeError("Find disabled; maxFindAmpl=None")
self.sr.showMsg("Exposing %s sec to find best star" % (self.expTime,))
findStarCmdStr = "findstars " + self.formatExposeArgs(doWindow=False)
yield sr.waitCmd(
actor = self.gcamActor,
cmdStr = findStarCmdStr,
keyVars = (self.guideModel.files, self.guideModel.star),
checkFail = False,
)
cmdVar = sr.value
self.didTakeImage = True
if self.sr.debug:
filePath = "debugFindFile"
else:
if not cmdVar.getKeyVarData(self.guideModel.files):
raise sr.ScriptError("exposure failed")
fileInfo = cmdVar.getKeyVarData(self.guideModel.files)[0]
filePath = "".join(fileInfo[2:4])
if self.sr.debug:
starDataList = makeStarData("f", (50.0, 75.0))
else:
starDataList = cmdVar.getKeyVarData(self.guideModel.star)
if not starDataList:
sr.value = StarMeas()
self.sr.showMsg("No stars found", severity=RO.Constants.sevWarning)
return
yield self.waitFindStarInList(filePath, starDataList)
def waitFindStarInList(self, filePath, starDataList):
"""Find best centroidable star in starDataList.
If a suitable star is found: set starXYPos to position
and sr.value to the star FWHM.
Otherwise log a warning and set sr.value to None.
Inputs:
- filePath: image file path on hub, relative to image root
(e.g. concatenate items 2:4 of the guider Files keyword)
- starDataList: list of star keyword data
"""
sr = self.sr
if self.maxFindAmpl is None:
raise RuntimeError("Find disabled; maxFindAmpl=None")
for starData in starDataList:
starXYPos = starData[2:4]
starAmpl = starData[14]
if (starAmpl is None) or (starAmpl > self.maxFindAmpl):
continue
sr.showMsg("Centroiding star at %0.1f, %0.1f" % tuple(starXYPos))
centroidCmdStr = "centroid file=%s on=%0.1f,%0.1f cradius=%0.1f" % \
(filePath, starXYPos[0], starXYPos[1], self.centroidRadPix)
yield sr.waitCmd(
actor = self.gcamActor,
cmdStr = centroidCmdStr,
keyVars = (self.guideModel.star,),
checkFail = False,
)
cmdVar = sr.value
if sr.debug:
starData = makeStarData("f", starXYPos)
else:
starData = cmdVar.getKeyVarData(self.guideModel.star)
if starData:
sr.value = StarMeas.fromStarKey(starData[0])
return
sr.showMsg("No usable star fainter than %s ADUs found" % self.maxFindAmpl,
severity=RO.Constants.sevWarning)
sr.value = StarMeas()
def waitFocusSweep(self):
"""Conduct a focus sweep.
Sets sr.value to True if successful.
"""
sr = self.sr
focPosFWHMList = []
self.logWdg.addMsg("===== Sweep =====")
self.clearGraph()
centerFocPos = float(self.getEntryNum(self.centerFocPosWdg))
focusRange = float(self.getEntryNum(self.focusRangeWdg))
startFocPos = centerFocPos - (focusRange / 2.0)
endFocPos = startFocPos + focusRange
numFocPos = self.getEntryNum(self.numFocusPosWdg)
if numFocPos < 3:
raise sr.ScriptError("need at least three focus positions")
focusIncr = self.focusIncrWdg.getNum()
if focusIncr < self.MinFocusIncr:
raise sr.ScriptError("focus increment too small (< %s %s)" % (self.MinFocusIncr, MicronStr))
self.focDir = (endFocPos > startFocPos)
extremeFocPos = Extremes(startFocPos)
extremeFocPos.addVal(endFocPos)
extremeFWHM = Extremes()
self.setGraphRange(extremeFocPos=extremeFocPos)
numMeas = 0
self.focPosToRestore = centerFocPos
for focInd in range(numFocPos):
focPos = float(startFocPos + (focInd*focusIncr))
doBacklashComp = (focInd == 0)
yield self.waitSetFocus(focPos, doBacklashComp)
sr.showMsg("Exposing for %s sec at focus %0.0f %s" % \
(self.expTime, focPos, MicronStr))
yield self.waitCentroid()
starMeas = sr.value
if sr.debug:
starMeas.fwhm = 0.0001 * (focPos - centerFocPos) ** 2
starMeas.fwhm += random.gauss(1.0, 0.25)
extremeFWHM.addVal(starMeas.fwhm)
self.logStarMeas("Sw %d" % (focInd+1,), focPos, starMeas)
if starMeas.fwhm is not None:
focPosFWHMList.append((focPos, starMeas.fwhm))
self.graphFocusMeas(focPosFWHMList, extremeFWHM=extremeFWHM)
# Fit a curve to the data
numMeas = len(focPosFWHMList)
if numMeas < 3:
raise sr.ScriptError("need at least 3 measurements to fit best focus")
focList, fwhmList = zip(*focPosFWHMList)
focPosArr = numpy.array(focList, dtype=float)
fwhmArr = numpy.array(fwhmList, dtype=float)
weightArr = numpy.ones(numMeas, dtype=float)
if numMeas > 3:
coeffs, dumYFit, dumYBand, fwhmSigma, dumCorrMatrix = polyfitw(focPosArr, fwhmArr, weightArr, 2, True)
elif numMeas == 3:
# too few points to measure fwhmSigma
coeffs = polyfitw(focPosArr, fwhmArr, weightArr, 2, False)
fwhmSigma = None
# Make sure fit curve has a minimum
if coeffs[2] <= 0.0:
raise sr.ScriptError("could not find minimum focus")
# find the best focus position
bestEstFocPos = (-1.0*coeffs[1])/(2.0*coeffs[2])
bestEstFWHM = coeffs[0]+coeffs[1]*bestEstFocPos+coeffs[2]*bestEstFocPos*bestEstFocPos
extremeFocPos.addVal(bestEstFocPos)
extremeFWHM.addVal(bestEstFWHM)
self.logFitFWHM("Fit", bestEstFocPos, bestEstFWHM)
# compute and log standard deviation, if possible
if fwhmSigma is not None:
focSigma = math.sqrt(fwhmSigma / coeffs[2])
self.logFitFWHM(u"Fit \N{GREEK SMALL LETTER SIGMA}", focSigma, fwhmSigma)
else:
focSigma = None
self.logWdg.addMsg(u"Warning: too few points to compute \N{GREEK SMALL LETTER SIGMA}")
# plot fit as a curve and best fit focus as a point
fitFocArr = numpy.arange(min(focPosArr), max(focPosArr), 1)
fitFWHMArr = coeffs[0] + coeffs[1]*fitFocArr + coeffs[2]*(fitFocArr**2.0)
self.plotAxis.plot(fitFocArr, fitFWHMArr, '-k', linewidth=2)
self.plotAxis.plot([bestEstFocPos], [bestEstFWHM], 'go')
self.setGraphRange(extremeFocPos=extremeFocPos, extremeFWHM=extremeFWHM)
# check fit error
if focSigma is not None:
maxFocSigma = self.MaxFocSigmaFac * focusRange
if focSigma > maxFocSigma:
raise sr.ScriptError("focus std. dev. too large: %0.0f > %0.0f" % (focSigma, maxFocSigma))
# check that estimated best focus is in sweep range
if not startFocPos <= bestEstFocPos <= endFocPos:
raise sr.ScriptError("best focus=%0.0f out of sweep range" % (bestEstFocPos,))
# move to best focus if "Move to best Focus" checked
moveBest = self.moveBestFocus.getBool()
if not moveBest:
return
self.setCurrFocus()
yield self.waitSetFocus(bestEstFocPos, doBacklashComp=True)
sr.showMsg("Exposing for %s sec at estimated best focus %d %s" % \
(self.expTime, bestEstFocPos, MicronStr))
yield self.waitCentroid()
finalStarMeas = sr.value
if sr.debug:
finalStarMeas.fwhm = 1.1
extremeFWHM.addVal(finalStarMeas.fwhm)
self.logStarMeas("Meas", bestEstFocPos, finalStarMeas)
finalFWHM = finalStarMeas.fwhm
if finalFWHM is not None:
self.plotAxis.plot([bestEstFocPos], [finalFWHM], 'ro')
self.setGraphRange(extremeFocPos=extremeFocPos, extremeFWHM=extremeFWHM)
else:
raise sr.ScriptError("could not measure FWHM at estimated best focus")
# A new best focus was picked; don't restore the original focus
# and do set Center Focus to the new focus
self.focPosToRestore = None
self.centerFocPosWdg.set(int(round(bestEstFocPos)))
def waitSetFocus(self, focPos, doBacklashComp=False):
"""Adjust focus.
To use: yield waitSetFocus(...)
Inputs:
- focPos: new focus position in um
- doBacklashComp: if True, perform backlash compensation
"""
sr = self.sr
focPos = float(focPos)
# to try to eliminate the backlash in the secondary mirror drive move back 1/2 the
# distance between the start and end position from the bestEstFocPos
if doBacklashComp and self.BacklashComp:
backlashFocPos = focPos - (abs(self.BacklashComp) * self.focDir)
sr.showMsg("Backlash comp: moving focus to %0.0f %s" % (backlashFocPos, MicronStr))
yield sr.waitCmd(
actor = "tcc",
cmdStr = "set focus=%0.0f" % (backlashFocPos,),
)
yield sr.waitMS(self.FocusWaitMS)
# move to desired focus position
sr.showMsg("Moving focus to %0.0f %s" % (focPos, MicronStr))
yield sr.waitCmd(
actor = "tcc",
cmdStr = "set focus=%0.0f" % (focPos,),
)
yield sr.waitMS(self.FocusWaitMS)
class SlitviewerFocusScript(BaseFocusScript):
"""Focus script for slitviewers
Inputs:
- gcamActor: name of guide camera actor (e.g. "dcam")
- instName: name of instrument (e.g. "DIS"); must be a name known to TUI.Inst.ExposeModel.
- imageViewerTLName: name of image viewer toplevel (e.g. "Guide.DIS Slitviewer")
- defBoreXY: default boresight position in [x, y] arcsec;
If an entry is None then no offset widget is shown for that axis
and 0 is used.
- defRadius: default centroid radius, in arcsec
- defBinFactor: default bin factor; if None then bin factor cannot be set
- doWindow: if True, subframe images during focus sequence
- windowOrigin: index of left or lower pixel for window (0 or 1 unless very wierd);
this is not use for star positions, which all have the same convention
- windowIsInclusive: is the upper-right window coord included in the image?
- helpURL: URL of help file
- debug: if True, run in debug mode, which uses fake data and does not communicate with the hub.
"""
def __init__(self,
sr,
gcamActor,
instName,
imageViewerTLName,
defBoreXY,
defRadius = 5.0,
defBinFactor = 1,
doWindow = True,
windowOrigin = 0,
windowIsInclusive = True,
helpURL = None,
debug = False,
):
"""The setup script; run once when the script runner
window is created.
"""
if len(defBoreXY) != 2:
raise ValueError("defBoreXY=%s must be a pair of values" % defBoreXY)
self.defBoreXY = defBoreXY
BaseFocusScript.__init__(self,
sr = sr,
gcamActor = gcamActor,
instName = instName,
imageViewerTLName = imageViewerTLName,
defRadius = defRadius,
defBinFactor = defBinFactor,
canSetStarPos = False,
maxFindAmpl = None,
doWindow = doWindow,
windowOrigin = windowOrigin,
windowIsInclusive = windowIsInclusive,
helpURL = helpURL,
debug = debug,
)
def createSpecialWdg(self):
"""Create boresight widget(s).
"""
sr = self.sr
self.boreNameWdgSet = []
for ii in range(2):
showWdg = (self.defBoreXY[ii] is not None)
if showWdg:
defVal = float(self.defBoreXY[ii])
else:
defVal = 0.0
letter = ("X", "Y")[ii]
wdgLabel = "Boresight %s" % (letter,)
boreWdg = RO.Wdg.FloatEntry(
master = sr.master,
label = wdgLabel,
minValue = -60.0,
maxValue = 60.0,
defValue = defVal,
defMenu = "Default",
helpText = wdgLabel + " position",
helpURL = self.helpURL,
)
if showWdg:
self.gr.gridWdg(boreWdg.label, boreWdg, "arcsec")
self.boreNameWdgSet.append(boreWdg)
def moveBoresight(self, boreXYDeg, msgStr="Moving the boresight", doWait=True):
"""Move the boresight to the specified position and sets starPos accordingly.
Waits if doWait true (in which case you must use "yield").
Records the initial boresight position in self.begBoreXYDeg, if not already done.
"""
sr = self.sr
cmdStr = "offset boresight %0.7f, %0.7f/pabs/computed" % (boreXYDeg[0], boreXYDeg[1])
# save the initial boresight position, if not already done
if self.begBoreXYDeg is None:
begBorePVTs = sr.getKeyVar(self.tccModel.boresight, ind=None)
if not sr.debug:
begBoreXYDeg = [RO.CnvUtil.posFromPVT(pvt) for pvt in begBorePVTs]
if None in begBoreXYDeg:
raise sr.ScriptError("current boresight position unknown")
self.begBoreXYDeg = begBoreXYDeg
else:
self.begBoreXYDeg = [0.0, 0.0]
# "self.begBoreXYDeg=%r" % self.begBoreXYDeg
# move boresight and adjust star position accordingly
starXYPix = [(boreXYDeg[ii] * self.instScale[ii]) + self.instCtr[ii] for ii in range(2)]
if msgStr:
sr.showMsg(msgStr)
self.currBoreXYDeg = boreXYDeg
self.setStarPos(starXYPix)
if doWait:
yield sr.waitCmd(
actor = "tcc",
cmdStr = cmdStr,
)
else:
sr.startCmd(
actor = "tcc",
cmdStr = cmdStr,
)
def waitExtraSetup(self):
"""Executed once at the start of each run
after calling initAll and getInstInfo but before doing anything else.
Override to do things such as put the instrument into a particular mode.
"""
# set boresight and star position and shift boresight
boreXYDeg = [self.getEntryNum(wdg) / 3600.0 for wdg in self.boreNameWdgSet]
yield self.moveBoresight(boreXYDeg, doWait=True)
class OffsetGuiderFocusScript(BaseFocusScript):
"""Focus script for offset guiders
Inputs:
- gcamActor: name of guide camera actor (e.g. "dcam")
- instPos: name of instrument position (e.g. "NA2"); case doesn't matter
- imageViewerTLName: name of image viewer toplevel (e.g. "Guide.DIS Slitviewer")
- defBoreXY: default boresight position in [x, y] arcsec;
If an entry is None then no offset widget is shown for that axis
and 0 is used.
- defRadius: default centroid radius, in arcsec
- defBinFactor: default bin factor; if None then bin factor cannot be set
- maxFindAmpl: maximum star amplitude for finding stars (peak - sky in ADUs);
if None then star finding is disabled.
- doWindow: if True, subframe images during focus sequence
- windowOrigin: index of left or lower pixel for window (0 or 1 unless very wierd);
this is not use for star positions, which all have the same convention
- windowIsInclusive: is the upper-right window coord included in the image?
- helpURL: URL of help file
- debug: if True, run in debug mode, which uses fake data and does not communicate with the hub.
"""
def __init__(self,
sr,
gcamActor,
instPos,
imageViewerTLName,
defRadius = 5.0,
defBinFactor = 1,
maxFindAmpl = None,
doWindow = True,
windowOrigin = 0,
windowIsInclusive = True,
helpURL = None,
debug = False,
):
"""The setup script; run once when the script runner
window is created.
"""
BaseFocusScript.__init__(self,
sr = sr,
gcamActor = gcamActor,
instName = None,
imageViewerTLName = imageViewerTLName,
defRadius = defRadius,
defBinFactor = defBinFactor,
maxFindAmpl = maxFindAmpl,
doWindow = doWindow,
windowOrigin = windowOrigin,
windowIsInclusive = windowIsInclusive,
helpURL = helpURL,
debug = debug,
)
self.instPos = instPos
def getInstInfo(self):
"""Obtains instrument data (in this case guider data).
Verifies the correct instrument and sets these attributes:
- instScale: x,y image scale in unbinned pixels/degree
- instCtr: x,y image center in unbinned pixels
- instLim: xmin, ymin, xmax, ymax image limits, inclusive, in unbinned pixels
- arcsecPerPixel: image scale in arcsec/unbinned pixel;
average of x and y scales
Raises ScriptError if wrong instrument.
"""
sr = self.sr
if not sr.debug:
# Make sure current instrument is correct
try:
currInstPosName = sr.getKeyVar(self.tccModel.instPos)
except sr.ScriptError:
raise sr.ScriptError("current instrument position unknown")
if not currInstPosName.lower() == self.instPos.lower():
raise sr.ScriptError("%s is not the current instrument position (%s)!" % (self.instPos, currInstPosName))
self.instScale = sr.getKeyVar(self.tccModel.gimScale, ind=None)
self.instCtr = sr.getKeyVar(self.tccModel.gimCtr, ind=None)
self.instLim = sr.getKeyVar(self.tccModel.gimLim, ind=None)
else:
# data from tcc tinst:I_NA2_DIS.DAT 18-OCT-2006
self.instScale = [-12066.6, 12090.5] # unbinned pixels/deg
self.instCtr = [240, 224]
self.instLim = [0, 0, 524, 511]
self.arcsecPerPixel = 3600.0 * 2 / (abs(self.instScale[0]) + abs(self.instScale[1]))
class ImagerFocusScript(BaseFocusScript):
"""Focus script for imaging instrument.
This is like an Offset Guider but the exposure commands
are sent to the instrument actor and centroid and findstars commands
are sent to nexpose using the image just taken.
For now there is no standard way to handle windowing and binning
so each instrument must override waitExpose to use windowing.
As a result the default value of doWindow is false.
However, if the exposure command gets arguments for windowing
then this will all change.
Inputs:
- instName: name of instrument (e.g. "DIS"); must be a name known to TUI.Inst.ExposeModel.
- imageViewerTLName: name of image viewer toplevel (e.g. "Guide.DIS Slitviewer")
- defRadius: default centroid radius, in arcsec
- defBinFactor: default bin factor; if None then bin factor cannot be set
- maxFindAmpl: maximum star amplitude for finding stars (peak - sky in ADUs);
if None then star finding is disabled.
- doWindow: if True, subframe images during focus sequence
- windowOrigin: index of left or lower pixel for window (0 or 1 unless very wierd);
this is not use for star positions, which all have the same convention
- windowIsInclusive: is the upper-right window coord included in the image?
- doZeroOverscan: if True then set overscan to zero
- helpURL: URL of help file
- debug: if True, run in debug mode, which uses fake data and does not communicate with the hub.
"""
def __init__(self,
sr,
instName,
imageViewerTLName = None,
defRadius = 5.0,
defBinFactor = 1,
maxFindAmpl = None,
doWindow = False,
windowOrigin = 1,
windowIsInclusive = True,
doZeroOverscan = False,
helpURL = None,
debug = False,
):
"""The setup script; run once when the script runner
window is created.
"""
# this is a hack for now
gcamActor = {
"nicfps": "nfocus",
"spicam": "sfocus",
}[instName.lower()]
BaseFocusScript.__init__(self,
sr = sr,
gcamActor = gcamActor,
instName = instName,
imageViewerTLName = imageViewerTLName,
defRadius = defRadius,
defBinFactor = defBinFactor,
maxFindAmpl = maxFindAmpl,
doWindow = doWindow,
windowOrigin = windowOrigin,
windowIsInclusive = windowIsInclusive,
helpURL = helpURL,
debug = debug,
)
self.exposeModel = TUI.Inst.ExposeModel.getModel(instName)
self.doZeroOverscan = bool(doZeroOverscan)
def formatBinFactorArg(self):
"""Return bin factor argument for expose/centroid/findstars command"""
if self.defBinFactor is None:
return ""
return "bin=%d,%d" % (self.binFactor, self.binFactor)
def formatExposeArgs(self, doWindow=True):
"""Format arguments for exposure command.
Inputs:
- doWindow: if true, window the exposure (if permitted)
"""
try:
retStr = BaseFocusScript.formatExposeArgs(self, doWindow)
except TypeError:
# try to shed light on an intermittent bug
print "Focus script bug diagnostic information"
print "self.__class__ =", self.__class__
print "inheritance tree =", inspect.getclasstree([self.__class__])
raise
retStr += " name=%s_focus" % (self.exposeModel.instInfo.instActor,)
if self.doZeroOverscan:
retStr += " overscan=0,0"
return retStr
def waitCentroid(self):
"""Take an exposure and centroid using 1x1 binning.
If the centroid is found, sets sr.value to the FWHM.
Otherwise sets sr.value to None.
"""
sr = self.sr
yield self.waitExpose()
filePath = sr.value
centroidCmdStr = "centroid file=%s on=%0.1f,%0.1f cradius=%0.1f" % \
(filePath, self.relStarPos[0], self.relStarPos[1], self.centroidRadPix)
yield sr.waitCmd(
actor = self.gcamActor,
cmdStr = centroidCmdStr,
keyVars = (self.guideModel.star,),
checkFail = False,
)
cmdVar = sr.value
if sr.debug:
starData = makeStarData("c", self.relStarPos)
else:
starData = cmdVar.getKeyVarData(self.guideModel.star)
if starData:
sr.value = StarMeas.fromStarKey(starData[0])
else:
sr.value = StarMeas()
def getExposeCmdDict(self, doWindow=True):
"""Get basic command arument dict for an expose command
This includes actor, cmdStr, abortCmdStr
"""
return dict(
actor = self.exposeModel.actor,
cmdStr = "object " + self.formatExposeArgs(doWindow),
abortCmdStr = "abort",
)
def waitExpose(self, doWindow=True):
"""Take an exposure.
Return the file path of the exposure in sr.value.
Raise ScriptError if the exposure fails.
"""
sr = self.sr
self.sr.showMsg("Exposing for %s sec" % (self.expTime,))
basicCmdDict = self.getExposeCmdDict(doWindow)
yield sr.waitCmd(
keyVars = (self.exposeModel.files,),
checkFail = False,
**basicCmdDict
)
cmdVar = sr.value
fileInfoList = cmdVar.getKeyVarData(self.exposeModel.files)
if self.sr.debug:
fileInfoList = [("me", "localhost", "tmp", "debug", "me", "test.fits")]
if not fileInfoList:
raise self.sr.ScriptError("exposure failed")
filePath = "".join(fileInfoList[0][2:6])
sr.value = filePath
def waitFindStar(self):
"""Take a full-frame exposure and find the best star that can be centroided.
Set sr.value to StarMeas for found star.
If no star found displays a warning and sets sr.value to empty StarMeas.
"""
sr = self.sr
yield self.waitExpose(doWindow=False)
filePath = sr.value
findStarCmdStr = "findstars file=%s" % (filePath,)
yield sr.waitCmd(
actor = self.gcamActor,
cmdStr = findStarCmdStr,
keyVars = (self.guideModel.star,),
checkFail = False,
)
cmdVar = sr.value
self.didTakeImage = True
if self.sr.debug:
starDataList = makeStarData("f", (50.0, 75.0))
else:
starDataList = cmdVar.getKeyVarData(self.guideModel.star)
if not starDataList:
sr.value = StarMeas()
self.sr.showMsg("No stars found", severity=RO.Constants.sevWarning)
return
yield self.waitFindStarInList(filePath, starDataList)
def polyfitw(x, y, w, ndegree, return_fit=False):
"""
Performs a weighted least-squares polynomial fit with optional error estimates.
Inputs:
x:
The independent variable vector.
y:
The dependent variable vector. This vector should be the same
length as X.
w:
The vector of weights. This vector should be same length as
X and Y.
ndegree:
The degree of polynomial to fit.
Outputs:
If return_fit is false (the default) then polyfitw returns only C, a vector of
coefficients of length ndegree+1.
If return_fit is true then polyfitw returns a tuple (c, yfit, yband, sigma, a)
yfit:
The vector of calculated Y's. Has an error of + or - yband.
yband:
Error estimate for each point = 1 sigma.
sigma:
The standard deviation in Y units.
a:
Correlation matrix of the coefficients.
Written by: George Lawrence, LASP, University of Colorado,
December, 1981 in IDL.
Weights added, April, 1987, G. Lawrence
Fixed bug with checking number of params, November, 1998,
Mark Rivers.
Python version, May 2002, Mark Rivers
"""
n = min(len(x), len(y)) # size = smaller of x,y
m = ndegree + 1 # number of elements in coeff vector
a = numpy.zeros((m,m), float) # least square matrix, weighted matrix
b = numpy.zeros(m, float) # will contain sum w*y*x^j
z = numpy.ones(n, float) # basis vector for constant term
a[0,0] = numpy.sum(w)
b[0] = numpy.sum(w*y)
for p in range(1, 2*ndegree+1): # power loop
z = z*x # z is now x^p
if (p < m): b[p] = numpy.sum(w*y*z) # b is sum w*y*x^j
sum = numpy.sum(w*z)
for j in range(max(0,(p-ndegree)), min(ndegree,p)+1):
a[j,p-j] = sum
a = numpy.linalg.inv(a)
c = numpy.dot(b, a)
if not return_fit:
return c # exit if only fit coefficients are wanted
# compute optional output parameters.
yfit = numpy.zeros(n, float)+c[0] # one-sigma error estimates, init
for k in range(1, ndegree +1):
yfit = yfit + c[k]*(x**k) # sum basis vectors
var = numpy.sum((yfit-y)**2 )/(n-m) # variance estimate, unbiased
sigma = numpy.sqrt(var)
yband = numpy.zeros(n, float) + a[0,0]
z = numpy.ones(n, float)
for p in range(1,2*ndegree+1): # compute correlated error estimates on y
z = z*x # z is now x^p
sum = 0.
for j in range(max(0, (p - ndegree)), min(ndegree, p)+1):
sum = sum + a[j,p-j]
yband = yband + sum * z # add in all the error sources
yband = yband*var
yband = numpy.sqrt(yband)
return c, yfit, yband, sigma, a
|
python
|
passages = {
'The Money Beat (Drumming)' : """Let me tell you how you can play the money beat on your drums. To get this beat going start with one part at a time. This way you can part your mind, and feel the groove a lot better. With you hi hat, play constant eight notes. We will add in some accents in the future, but for now, just play eight notes. Rememember to count out loud when you are playing, it will help you out a lot!
Now that you have got this, try to not think about it. When people first learn to play the drums they usually think too much. This is where they start to make mistakes, they overthink. Your hi hat hand will not change this motion, so try to forget about it. Now it's time to concentrate on your other hand. With this hand, you will be playing quarter notes on the snare. These snare hits will be on the 2 and 4 count.
Good! Now let’s finish it off with the bass drum. This too will be playing quarter notes, however, not on the 2 and four. Most beginners will have trouble with this, they will end up playing their snare and bass drum at the same time. Take your time and it will come to you. Play the bass on the 1 and 3 counts.
""",
"Revel's Pitch" : "Revel is a full-service digital consultancy that partners with organizations to lead go-to-market transformation by building seamless, simple, connected experiences. We are problem solvers, designers and builders united around a core set of values and a commitment to solving complex challenges swiftly, efficiently and elegantly.",
"Doppler Effect": """The Doppler effect (or the Doppler shift) is the change in frequency of a wave in relation to an observer who is moving relative to the wave source. It is named after the Austrian physicist Christian Doppler, who described the phenomenon in 1842.
A common example of Doppler shift is the change of pitch heard when a vehicle sounding a horn approaches and recedes from an observer. Compared to the emitted frequency, the received frequency is higher during the approach, identical at the instant of passing by, and lower during the recession.
The reason for the Doppler effect is that when the source of the waves is moving towards the observer, each successive wave crest is emitted from a position closer to the observer than the crest of the previous wave. Therefore, each wave takes slightly less time to reach the observer than the previous wave. Hence, the time between the arrivals of successive wave crests at the observer is reduced, causing an increase in the frequency. While they are traveling, the distance between successive wave fronts is reduced, so the waves "bunch together". Conversely, if the source of waves is moving away from the observer, each wave is emitted from a position farther from the observer than the previous wave, so the arrival time between successive waves is increased, reducing the frequency. The distance between successive wave fronts is then increased, so the waves "spread out".
For waves that propagate in a medium, such as sound waves, the velocity of the observer and of the source are relative to the medium in which the waves are transmitted. The total Doppler effect may therefore result from motion of the source, motion of the observer, or motion of the medium. Each of these effects is analyzed separately. For waves which do not require a medium, such as light or gravity in general relativity, only the relative difference in velocity between the observer and the source needs to be considered.
"""
}
|
python
|
from django.db import models
from django.contrib.auth.models import AbstractUser
from django_countries.fields import CountryField
# Create your models here.
class Degree(models.Model):
degree_id = models.CharField(max_length=9, verbose_name='Degree ID', primary_key=True)
degree = models.CharField(max_length=20,default='mac')
class Meta:
verbose_name = 'Degree'
verbose_name_plural = verbose_name
def __str__(self):
return self.degree
class Semester(models.Model):
semester_ID = models.CharField(primary_key=True, max_length=5, verbose_name='Semester ID')
semester_Name = models.CharField(max_length=20, verbose_name='Semester Name')
class Meta:
verbose_name = 'Semester'
verbose_name_plural = verbose_name
def __str__(self):
return self.semester_Name
class Student(AbstractUser):
Address = models.CharField(max_length=50, null=True, blank=True)
City = models.CharField(max_length=10, null=True, blank=True)
Province = models.CharField(max_length=10, null=True, blank=True)
Country = CountryField()
Postal = models.CharField(max_length=10, null=True, blank=True)
Gender = models.CharField(max_length=6, choices=(('male','MALE'),('female','FEMALE')))
Phone = models.CharField(max_length=12, null=True, blank=True)
GPA = models.IntegerField(null=True, blank=True)
degree = models.ForeignKey(Degree, on_delete=models.CASCADE, null=True, blank=True)
class Meta:
verbose_name = 'Student Infomation'
verbose_name_plural = verbose_name
def __str__(self):
return self.username
class Course(models.Model):
Course_ID = models.CharField(verbose_name='Course ID', primary_key=True, max_length=6)
Course_Name = models.CharField(verbose_name='Course Name', max_length=15)
Course_Description = models.CharField(verbose_name='Course Description', null=True, blank=True, max_length=100 )
Course_Hour = models.IntegerField(null=True, blank=True, verbose_name='Course Hour')
Degree = models.ForeignKey(Degree, on_delete=models.CASCADE)
Semester = models.ForeignKey(Semester, on_delete=models.CASCADE)
class Meta:
verbose_name = 'Course Information'
verbose_name_plural = verbose_name
def __str__(self):
return self.Course_Name
class Register(models.Model):
student = models.ForeignKey(Student, verbose_name='Student', on_delete=models.CASCADE)
Course = models.ForeignKey(Course, on_delete=models.CASCADE)
Grade = models.IntegerField(null=True, blank=True)
class Meta:
verbose_name = 'Course Registration Information'
verbose_name_plural = verbose_name
|
python
|
from setuptools import setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="fastly-python",
version="1.0.4",
author="Chris Zacharias",
author_email="[email protected]",
description=("A Python client libary for the Fastly API."),
license="BSD",
keywords="fastly",
url="https://github.com/zebrafishlabs/fastly-python",
packages=['fastly', 'tests'],
install_requires=[
'httplib2', 'six',
],
scripts=['bin/fastly_upload_vcl.py', 'bin/fastly_purge_url.py'],
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: BSD License",
],
)
|
python
|
#!/usr/bin/env python3
"""Utility class for logging. This class is a wrapper around the original
logging module :py:mod:`logging` and can be used to apply the
`panda_autograsp <https://github.com/rickstaa/panda_autograsp>`_
formatters, filters and handlers to the logging object.
"""
# Make script both python2 and python3 compatible
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
try:
input = raw_input
except NameError:
pass
# Main python packages
import logging
import sys
import os
import colorlog
#################################################
# Script parameters #############################
#################################################
ROOT_LOG_LEVEL = logging.INFO
ROOT_LOG_STREAM = sys.stdout
#################################################
# Configure root logger function ################
#################################################
def clear_root():
"""Function used to reset the root logger."""
root_logger = logging.getLogger()
# clear any existing handles to streams because we don't want duplicate logs
# NOTE: we assume that any stream handles we find are to ROOT_LOG_STREAM,
# which is usually the case(because it is stdout). This is fine because we
# will be re-creating that handle. Otherwise we might be deleting a handle
# that won't be re-created, which could result in dropped logs.
for hdlr in root_logger.handlers:
if isinstance(hdlr, logging.StreamHandler):
root_logger.removeHandler(hdlr)
# create nullhandler to suppress no handler warning
root_logger.addHandler(logging.NullHandler())
# Set root configured to true
Logger.ROOT_CONFIGURED = False
#################################################
# Configure root logger function ################
#################################################
def configure_root(log_level=ROOT_LOG_LEVEL):
"""Function used to configure the root logger.
Parameters
----------
log_level : :py:obj:`int`, optional
The log level, by default ROOT_LOG_LEVEL
Returns
-------
:py:obj:`Logger`
Root logger.
"""
root_logger = logging.getLogger()
# clear any existing handles to streams because we don't want duplicate logs
# NOTE: we assume that any stream handles we find are to ROOT_LOG_STREAM,
# which is usually the case(because it is stdout). This is fine because we
# will be re-creating that handle. Otherwise we might be deleting a handle
# that won't be re-created, which could result in dropped logs.
for hdlr in root_logger.handlers:
if isinstance(hdlr, logging.StreamHandler):
root_logger.removeHandler(hdlr)
# configure the root logger
root_logger.setLevel(log_level)
hdlr = logging.StreamHandler(ROOT_LOG_STREAM)
formatter = colorlog.ColoredFormatter(
(
"%(blue)s%(name)-10s %(log_color)s%(levelname)-8s%(reset)s "
"%(white)s%(message)s"
),
reset=True,
log_colors={
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red,bg_white",
},
)
hdlr.setFormatter(formatter)
root_logger.addHandler(hdlr)
# Set root configured to true
Logger.ROOT_CONFIGURED = True
return root_logger
#################################################
# Root Logger file handler add ##################
#################################################
def add_root_log_file(log_file, mode="a", encoding=None, delay=False):
"""
Add a log file to the root logger.
Parameters
----------
log_file : :py:obj:`str`
The path to the log file.
mode : :py:obj:`str`
Log file writing mode, by default 'a'.
encoding : :py:obj:`str`
File encoding used, by default None.
delay : :py:obj:`str`
If delay is true, then file opening is deferred until the first call
to emit(), by default False.
"""
root_logger = logging.getLogger()
# Create model folder if it does not exists
log_folder = os.path.abspath(os.path.join(log_file, os.pardir))
if not os.path.exists(log_folder):
try:
os.makedirs(log_folder)
except OSError:
root_logger.info(
"Log file could not be created logger not logging to file {}".format(
log_file
)
)
return
# Add a file handle to the root logger
hdlr = logging.FileHandler(log_file, mode, encoding, delay)
formatter = logging.Formatter(
"%(asctime)s %(name)-10s %(levelname)-8s %(message)s", datefmt="%m-%d %H:%M:%S"
)
hdlr.setFormatter(formatter)
root_logger.info("Root logger now logging to {}".format(log_file))
root_logger.addHandler(hdlr)
#################################################
# Logger class ##################################
#################################################
class Logger(object):
"""Panda autograsp Logger class.
"""
ROOT_CONFIGURED = False
@staticmethod
def clear_root():
"""Reset root logger."""
clear_root()
@staticmethod
def reconfigure_root():
"""Reconfigure the root logger."""
configure_root()
@staticmethod
def get_logger(
name=None,
log_level=logging.INFO,
log_file=None,
silence=False,
mode="a",
encoding=None,
delay=False,
):
"""
Build a logger. All logs will be propagated up to the root logger if not
silenced. If log_file is provided, logs will be written out to that file.
If no logger name is given, log_file will be handed the root logger,
otherwise it will only be used by this particular logger.
Parameters
----------
name : :py:obj:`str`
The name of the logger to be built, by default "" thus formatting the
root logger.
log_level : :py:obj:`int`
The log level. See the python logging module documentation for possible
enum values.
log_file : :py:obj:`str`
The path to the log file to log to.
silence : :py:obj:`bool`
Whether or not to silence this logger. If it is silenced, the only way
to get output from this logger is through a non-global log file.
mode : :py:obj:`str`
Log file writing mode, by default 'a'.
encoding : :py:obj:`str`
File encoding used, by default None.
delay : :py:obj:`str`
If delay is true, then file opening is deferred until the first call
to emit(), by default False.
Returns
-------
:py:obj:`Logger`
A custom logger.
"""
# Create a new logger object with the panda_autograsp formatting
if not name: # Format the root logger
# some checks for silencing/no-op logging
if silence:
raise ValueError(
"You can't silence a logger and log to a global log file!"
)
# Setup root_logger
if not Logger.ROOT_CONFIGURED:
root_logger = configure_root(log_level)
Logger.ROOT_CONFIGURED = True
# configure the log file stream
if log_file is not None:
add_root_log_file(log_file, mode, encoding, delay)
# Return root logger
return root_logger
else: # Create new logger object
no_op = False
# some checks for silencing/no-op logging
if silence and log_file is None:
logging.warning("You are creating a no-op logger!")
no_op = True
# build a logger
logger = logging.getLogger(name)
logger.setLevel(log_level)
# silence the logger by preventing it from propagating upwards to the root
logger.propagate = not silence
# configure the log file stream
if log_file is not None:
# Add logger file handler #
hdlr = logging.FileHandler(log_file, mode, encoding, delay)
formatter = logging.Formatter(
"%(asctime)s %(name)-10s %(levelname)-8s %(message)s",
datefmt="%m-%d %H:%M:%S",
)
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
# add a no-op handler to suppress warnings about there being no handlers
if no_op:
logger.addHandler(logging.NullHandler())
return logger
@staticmethod
def add_log_file(log_file=None, logger=None, mode="a", encoding=None, delay=False):
"""
Add a log file to this logger. If no logger is given, log_file will be
handed the root logger, otherwise it will only be used by this particular
logger.
Parameters
----------
log_file : :py:obj:`str`
The path to the log file to log to.
logger : :py:obj:`logging.Logger`
The logger.
mode : :py:obj:`str`
Log file writing mode, by default 'a'.
encoding : :py:obj:`str`
File encoding used, by default None.
delay : :py:obj:`str`
If delay is true, then file opening is deferred until the first call
to emit(), by default False.
"""
# Add logfile to logger
if logger: # Add to root logger
add_root_log_file(log_file, mode, encoding, delay)
else: # Add to specified logger
hdlr = logging.FileHandler(log_file, mode, encoding, delay)
formatter = logging.Formatter(
"%(asctime)s %(name)-10s %(levelname)-8s %(message)s",
datefmt="%m-%d %H:%M:%S",
)
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
|
python
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetDataGuardAssociationResult',
'AwaitableGetDataGuardAssociationResult',
'get_data_guard_association',
]
@pulumi.output_type
class GetDataGuardAssociationResult:
"""
A collection of values returned by getDataGuardAssociation.
"""
def __init__(__self__, apply_lag=None, apply_rate=None, availability_domain=None, backup_network_nsg_ids=None, create_async=None, creation_type=None, data_guard_association_id=None, database_admin_password=None, database_id=None, database_software_image_id=None, delete_standby_db_home_on_delete=None, display_name=None, hostname=None, id=None, lifecycle_details=None, nsg_ids=None, peer_data_guard_association_id=None, peer_database_id=None, peer_db_home_id=None, peer_db_system_id=None, peer_role=None, peer_vm_cluster_id=None, protection_mode=None, role=None, shape=None, state=None, subnet_id=None, time_created=None, transport_type=None):
if apply_lag and not isinstance(apply_lag, str):
raise TypeError("Expected argument 'apply_lag' to be a str")
pulumi.set(__self__, "apply_lag", apply_lag)
if apply_rate and not isinstance(apply_rate, str):
raise TypeError("Expected argument 'apply_rate' to be a str")
pulumi.set(__self__, "apply_rate", apply_rate)
if availability_domain and not isinstance(availability_domain, str):
raise TypeError("Expected argument 'availability_domain' to be a str")
pulumi.set(__self__, "availability_domain", availability_domain)
if backup_network_nsg_ids and not isinstance(backup_network_nsg_ids, list):
raise TypeError("Expected argument 'backup_network_nsg_ids' to be a list")
pulumi.set(__self__, "backup_network_nsg_ids", backup_network_nsg_ids)
if create_async and not isinstance(create_async, bool):
raise TypeError("Expected argument 'create_async' to be a bool")
pulumi.set(__self__, "create_async", create_async)
if creation_type and not isinstance(creation_type, str):
raise TypeError("Expected argument 'creation_type' to be a str")
pulumi.set(__self__, "creation_type", creation_type)
if data_guard_association_id and not isinstance(data_guard_association_id, str):
raise TypeError("Expected argument 'data_guard_association_id' to be a str")
pulumi.set(__self__, "data_guard_association_id", data_guard_association_id)
if database_admin_password and not isinstance(database_admin_password, str):
raise TypeError("Expected argument 'database_admin_password' to be a str")
pulumi.set(__self__, "database_admin_password", database_admin_password)
if database_id and not isinstance(database_id, str):
raise TypeError("Expected argument 'database_id' to be a str")
pulumi.set(__self__, "database_id", database_id)
if database_software_image_id and not isinstance(database_software_image_id, str):
raise TypeError("Expected argument 'database_software_image_id' to be a str")
pulumi.set(__self__, "database_software_image_id", database_software_image_id)
if delete_standby_db_home_on_delete and not isinstance(delete_standby_db_home_on_delete, str):
raise TypeError("Expected argument 'delete_standby_db_home_on_delete' to be a str")
pulumi.set(__self__, "delete_standby_db_home_on_delete", delete_standby_db_home_on_delete)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if hostname and not isinstance(hostname, str):
raise TypeError("Expected argument 'hostname' to be a str")
pulumi.set(__self__, "hostname", hostname)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if lifecycle_details and not isinstance(lifecycle_details, str):
raise TypeError("Expected argument 'lifecycle_details' to be a str")
pulumi.set(__self__, "lifecycle_details", lifecycle_details)
if nsg_ids and not isinstance(nsg_ids, list):
raise TypeError("Expected argument 'nsg_ids' to be a list")
pulumi.set(__self__, "nsg_ids", nsg_ids)
if peer_data_guard_association_id and not isinstance(peer_data_guard_association_id, str):
raise TypeError("Expected argument 'peer_data_guard_association_id' to be a str")
pulumi.set(__self__, "peer_data_guard_association_id", peer_data_guard_association_id)
if peer_database_id and not isinstance(peer_database_id, str):
raise TypeError("Expected argument 'peer_database_id' to be a str")
pulumi.set(__self__, "peer_database_id", peer_database_id)
if peer_db_home_id and not isinstance(peer_db_home_id, str):
raise TypeError("Expected argument 'peer_db_home_id' to be a str")
pulumi.set(__self__, "peer_db_home_id", peer_db_home_id)
if peer_db_system_id and not isinstance(peer_db_system_id, str):
raise TypeError("Expected argument 'peer_db_system_id' to be a str")
pulumi.set(__self__, "peer_db_system_id", peer_db_system_id)
if peer_role and not isinstance(peer_role, str):
raise TypeError("Expected argument 'peer_role' to be a str")
pulumi.set(__self__, "peer_role", peer_role)
if peer_vm_cluster_id and not isinstance(peer_vm_cluster_id, str):
raise TypeError("Expected argument 'peer_vm_cluster_id' to be a str")
pulumi.set(__self__, "peer_vm_cluster_id", peer_vm_cluster_id)
if protection_mode and not isinstance(protection_mode, str):
raise TypeError("Expected argument 'protection_mode' to be a str")
pulumi.set(__self__, "protection_mode", protection_mode)
if role and not isinstance(role, str):
raise TypeError("Expected argument 'role' to be a str")
pulumi.set(__self__, "role", role)
if shape and not isinstance(shape, str):
raise TypeError("Expected argument 'shape' to be a str")
pulumi.set(__self__, "shape", shape)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if subnet_id and not isinstance(subnet_id, str):
raise TypeError("Expected argument 'subnet_id' to be a str")
pulumi.set(__self__, "subnet_id", subnet_id)
if time_created and not isinstance(time_created, str):
raise TypeError("Expected argument 'time_created' to be a str")
pulumi.set(__self__, "time_created", time_created)
if transport_type and not isinstance(transport_type, str):
raise TypeError("Expected argument 'transport_type' to be a str")
pulumi.set(__self__, "transport_type", transport_type)
@property
@pulumi.getter(name="applyLag")
def apply_lag(self) -> str:
"""
The lag time between updates to the primary database and application of the redo data on the standby database, as computed by the reporting database. Example: `9 seconds`
"""
return pulumi.get(self, "apply_lag")
@property
@pulumi.getter(name="applyRate")
def apply_rate(self) -> str:
"""
The rate at which redo logs are synced between the associated databases. Example: `180 Mb per second`
"""
return pulumi.get(self, "apply_rate")
@property
@pulumi.getter(name="availabilityDomain")
def availability_domain(self) -> str:
return pulumi.get(self, "availability_domain")
@property
@pulumi.getter(name="backupNetworkNsgIds")
def backup_network_nsg_ids(self) -> Sequence[str]:
return pulumi.get(self, "backup_network_nsg_ids")
@property
@pulumi.getter(name="createAsync")
def create_async(self) -> bool:
return pulumi.get(self, "create_async")
@property
@pulumi.getter(name="creationType")
def creation_type(self) -> str:
return pulumi.get(self, "creation_type")
@property
@pulumi.getter(name="dataGuardAssociationId")
def data_guard_association_id(self) -> str:
return pulumi.get(self, "data_guard_association_id")
@property
@pulumi.getter(name="databaseAdminPassword")
def database_admin_password(self) -> str:
return pulumi.get(self, "database_admin_password")
@property
@pulumi.getter(name="databaseId")
def database_id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the reporting database.
"""
return pulumi.get(self, "database_id")
@property
@pulumi.getter(name="databaseSoftwareImageId")
def database_software_image_id(self) -> str:
return pulumi.get(self, "database_software_image_id")
@property
@pulumi.getter(name="deleteStandbyDbHomeOnDelete")
def delete_standby_db_home_on_delete(self) -> str:
return pulumi.get(self, "delete_standby_db_home_on_delete")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def hostname(self) -> str:
return pulumi.get(self, "hostname")
@property
@pulumi.getter
def id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the Data Guard association.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> str:
"""
Additional information about the current lifecycleState, if available.
"""
return pulumi.get(self, "lifecycle_details")
@property
@pulumi.getter(name="nsgIds")
def nsg_ids(self) -> Sequence[str]:
return pulumi.get(self, "nsg_ids")
@property
@pulumi.getter(name="peerDataGuardAssociationId")
def peer_data_guard_association_id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the peer database's Data Guard association.
"""
return pulumi.get(self, "peer_data_guard_association_id")
@property
@pulumi.getter(name="peerDatabaseId")
def peer_database_id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the associated peer database.
"""
return pulumi.get(self, "peer_database_id")
@property
@pulumi.getter(name="peerDbHomeId")
def peer_db_home_id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the Database Home containing the associated peer database.
"""
return pulumi.get(self, "peer_db_home_id")
@property
@pulumi.getter(name="peerDbSystemId")
def peer_db_system_id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the DB system containing the associated peer database.
"""
return pulumi.get(self, "peer_db_system_id")
@property
@pulumi.getter(name="peerRole")
def peer_role(self) -> str:
"""
The role of the peer database in this Data Guard association.
"""
return pulumi.get(self, "peer_role")
@property
@pulumi.getter(name="peerVmClusterId")
def peer_vm_cluster_id(self) -> str:
return pulumi.get(self, "peer_vm_cluster_id")
@property
@pulumi.getter(name="protectionMode")
def protection_mode(self) -> str:
"""
The protection mode of this Data Guard association. For more information, see [Oracle Data Guard Protection Modes](http://docs.oracle.com/database/122/SBYDB/oracle-data-guard-protection-modes.htm#SBYDB02000) in the Oracle Data Guard documentation.
"""
return pulumi.get(self, "protection_mode")
@property
@pulumi.getter
def role(self) -> str:
"""
The role of the reporting database in this Data Guard association.
"""
return pulumi.get(self, "role")
@property
@pulumi.getter
def shape(self) -> str:
return pulumi.get(self, "shape")
@property
@pulumi.getter
def state(self) -> str:
"""
The current state of the Data Guard association.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> str:
return pulumi.get(self, "subnet_id")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The date and time the Data Guard association was created.
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="transportType")
def transport_type(self) -> str:
"""
The redo transport type used by this Data Guard association. For more information, see [Redo Transport Services](http://docs.oracle.com/database/122/SBYDB/oracle-data-guard-redo-transport-services.htm#SBYDB00400) in the Oracle Data Guard documentation.
"""
return pulumi.get(self, "transport_type")
class AwaitableGetDataGuardAssociationResult(GetDataGuardAssociationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDataGuardAssociationResult(
apply_lag=self.apply_lag,
apply_rate=self.apply_rate,
availability_domain=self.availability_domain,
backup_network_nsg_ids=self.backup_network_nsg_ids,
create_async=self.create_async,
creation_type=self.creation_type,
data_guard_association_id=self.data_guard_association_id,
database_admin_password=self.database_admin_password,
database_id=self.database_id,
database_software_image_id=self.database_software_image_id,
delete_standby_db_home_on_delete=self.delete_standby_db_home_on_delete,
display_name=self.display_name,
hostname=self.hostname,
id=self.id,
lifecycle_details=self.lifecycle_details,
nsg_ids=self.nsg_ids,
peer_data_guard_association_id=self.peer_data_guard_association_id,
peer_database_id=self.peer_database_id,
peer_db_home_id=self.peer_db_home_id,
peer_db_system_id=self.peer_db_system_id,
peer_role=self.peer_role,
peer_vm_cluster_id=self.peer_vm_cluster_id,
protection_mode=self.protection_mode,
role=self.role,
shape=self.shape,
state=self.state,
subnet_id=self.subnet_id,
time_created=self.time_created,
transport_type=self.transport_type)
def get_data_guard_association(data_guard_association_id: Optional[str] = None,
database_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDataGuardAssociationResult:
"""
This data source provides details about a specific Data Guard Association resource in Oracle Cloud Infrastructure Database service.
Gets the specified Data Guard association's configuration information.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_data_guard_association = oci.database.get_data_guard_association(data_guard_association_id=oci_database_data_guard_association["test_data_guard_association"]["id"],
database_id=oci_database_database["test_database"]["id"])
```
:param str data_guard_association_id: The Data Guard association's [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
:param str database_id: The database [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
"""
__args__ = dict()
__args__['dataGuardAssociationId'] = data_guard_association_id
__args__['databaseId'] = database_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:database/getDataGuardAssociation:getDataGuardAssociation', __args__, opts=opts, typ=GetDataGuardAssociationResult).value
return AwaitableGetDataGuardAssociationResult(
apply_lag=__ret__.apply_lag,
apply_rate=__ret__.apply_rate,
availability_domain=__ret__.availability_domain,
backup_network_nsg_ids=__ret__.backup_network_nsg_ids,
create_async=__ret__.create_async,
creation_type=__ret__.creation_type,
data_guard_association_id=__ret__.data_guard_association_id,
database_admin_password=__ret__.database_admin_password,
database_id=__ret__.database_id,
database_software_image_id=__ret__.database_software_image_id,
delete_standby_db_home_on_delete=__ret__.delete_standby_db_home_on_delete,
display_name=__ret__.display_name,
hostname=__ret__.hostname,
id=__ret__.id,
lifecycle_details=__ret__.lifecycle_details,
nsg_ids=__ret__.nsg_ids,
peer_data_guard_association_id=__ret__.peer_data_guard_association_id,
peer_database_id=__ret__.peer_database_id,
peer_db_home_id=__ret__.peer_db_home_id,
peer_db_system_id=__ret__.peer_db_system_id,
peer_role=__ret__.peer_role,
peer_vm_cluster_id=__ret__.peer_vm_cluster_id,
protection_mode=__ret__.protection_mode,
role=__ret__.role,
shape=__ret__.shape,
state=__ret__.state,
subnet_id=__ret__.subnet_id,
time_created=__ret__.time_created,
transport_type=__ret__.transport_type)
|
python
|
"""Test AdsConnection class.
:author: Stefan Lehmann <[email protected]>
:license: MIT, see license file or https://opensource.org/licenses/MIT
:created on: 2018-06-11 18:15:58
"""
import time
import unittest
import pyads
import struct
from pyads.testserver import AdsTestServer, AmsPacket
from pyads import constants
from collections import OrderedDict
# These are pretty arbitrary
TEST_SERVER_AMS_NET_ID = "127.0.0.1.1.1"
TEST_SERVER_IP_ADDRESS = "127.0.0.1"
TEST_SERVER_AMS_PORT = pyads.PORT_SPS1
class AdsConnectionClassTestCase(unittest.TestCase):
"""Testcase for ADS connection class."""
@classmethod
def setUpClass(cls):
# type: () -> None
"""Setup the ADS testserver."""
cls.test_server = AdsTestServer(logging=True)
cls.test_server.start()
# wait a bit otherwise error might occur
time.sleep(1)
@classmethod
def tearDownClass(cls):
# type: () -> None
"""Tear down the testserver."""
cls.test_server.stop()
# wait a bit for server to shutdown
time.sleep(1)
def setUp(self):
# type: () -> None
"""Establish connection to the testserver."""
self.test_server.request_history = []
self.plc = pyads.Connection(
TEST_SERVER_AMS_NET_ID, TEST_SERVER_AMS_PORT, TEST_SERVER_IP_ADDRESS
)
def assert_command_id(self, request, target_id):
# type: (AmsPacket, int) -> None
"""Assert command_id and target_id."""
# Check the request code received by the server
command_id = request.ams_header.command_id
command_id = struct.unpack("<H", command_id)[0]
self.assertEqual(command_id, target_id)
def test_initialization(self):
# type: () -> None
"""Test init process."""
with self.assertRaises(TypeError):
pyads.Connection()
with self.assertRaises(AttributeError):
pyads.Connection(None, None)
def test_no_ip_address(self):
# type: () -> None
"""Autogenerate IP-address from AMS net id.
Autogenerate IP-address from AMS net id if no ip address is given
on initialization.
"""
plc = pyads.Connection(TEST_SERVER_AMS_NET_ID, TEST_SERVER_AMS_PORT)
self.assertEqual(TEST_SERVER_IP_ADDRESS, plc.ip_address)
def test_open_twice(self):
# type: () -> None
"""Open plc connection twice."""
self.plc.close()
with self.plc:
# connection should now be open
self.assertTrue(self.plc.is_open)
self.plc.open()
# connection should now be closed
self.assertFalse(self.plc.is_open)
def test_read_device_info(self):
with self.plc:
name, version = self.plc.read_device_info()
requests = self.test_server.request_history
self.assertEqual(len(requests), 1)
self.assert_command_id(requests[0], constants.ADSCOMMAND_READDEVICEINFO)
def test_read_uint(self):
with self.plc:
result = self.plc.read(pyads.INDEXGROUP_DATA, 1, pyads.PLCTYPE_UDINT)
# Retrieve list of received requests from server
requests = self.test_server.request_history
# Assert that the server received a request
self.assertEqual(len(requests), 1)
# Assert that the server received the correct command
self.assert_command_id(requests[0], constants.ADSCOMMAND_READ)
# Test server just returns repeated bytes of 0x0F terminated with 0x00
expected_result = struct.unpack("<I", "\x0F\x0F\x0F\x00".encode("utf-8"))[0]
self.assertEqual(result, expected_result)
def test_read_string(self):
# Make request to read data from a random index (the test server will
# return the same thing regardless)
with self.plc:
result = self.plc.read(
index_group=constants.INDEXGROUP_DATA,
index_offset=1,
plc_datatype=constants.PLCTYPE_STRING,
)
# Retrieve list of received requests from server
requests = self.test_server.request_history
# Assert that the server received a request
self.assertEqual(len(requests), 1)
# Assert that the server received the correct command
self.assert_command_id(requests[0], constants.ADSCOMMAND_READ)
# The string buffer is 1024 bytes long, this will be filled with \x0F
# and null terminated with \x00 by our test server. The \x00 will get
# chopped off during parsing to python string type
expected_result = "\x0F" * 1023
self.assertEqual(result, expected_result)
def test_write_uint(self):
value = 100
with self.plc:
self.plc.write(
index_group=constants.INDEXGROUP_DATA,
index_offset=1,
value=value,
plc_datatype=constants.PLCTYPE_UDINT,
)
# Retrieve list of received requests from server
requests = self.test_server.request_history
# Assert that the server received a request
self.assertEqual(len(requests), 1)
# Assert that the server received the correct command
self.assert_command_id(requests[0], constants.ADSCOMMAND_WRITE)
# Check the value received by the server
received_value = struct.unpack("<I", requests[0].ams_header.data[12:])[0]
self.assertEqual(value, received_value)
def test_write_float(self):
value = 123.456
with self.plc:
self.plc.write(
index_group=constants.INDEXGROUP_DATA,
index_offset=1,
value=value,
plc_datatype=constants.PLCTYPE_REAL,
)
# Retrieve list of received requests from server
requests = self.test_server.request_history
# Assert that the server received a request
self.assertEqual(len(requests), 1)
# Assert that the server received the correct command
self.assert_command_id(requests[0], constants.ADSCOMMAND_WRITE)
# Check the value received by the server
received_value = struct.unpack("<f", requests[0].ams_header.data[12:])[0]
# Pythons internal representation of a float has a higher precision
# than 32 bits, so will be more precise than the value received by the
# server. To do a comparison we must put the initial 'write' value
# through the round-trip of converting to 32-bit precision.
value_32 = struct.unpack("<f", struct.pack("<f", value))[0]
self.assertEqual(value_32, received_value)
def test_write_string(self):
value = "Test String 1234."
with self.plc:
self.plc.write(
index_group=constants.INDEXGROUP_DATA,
index_offset=1,
value=value,
plc_datatype=constants.PLCTYPE_STRING,
)
# Retrieve list of received requests from server
requests = self.test_server.request_history
# Assert that the server received a request
self.assertEqual(len(requests), 1)
# Assert that the server received the correct command
self.assert_command_id(requests[0], constants.ADSCOMMAND_WRITE)
# Check the value received by the server
received_value = requests[0].ams_header.data[12:]
# String should have been sent null terminated
sent_value = (value + "\x00").encode("utf-8")
self.assertEqual(sent_value, received_value)
def test_read_state(self):
with self.plc:
ads_state, device_state = self.plc.read_state()
# Retrieve list of received requests from server
requests = self.test_server.request_history
# Assert that the server received a request
self.assertEqual(len(requests), 1)
# Assert that the server received the correct command
self.assert_command_id(requests[0], constants.ADSCOMMAND_READSTATE)
# Test server should return 'running'
self.assertEqual(ads_state, constants.ADSSTATE_RUN)
# Device state... Always zero?
self.assertEqual(device_state, 0)
def test_write_control(self):
# Set the ADS State to reset
# Device state is unused I think? Always seems to be zero
with self.plc:
self.plc.write_control(
ads_state=constants.ADSSTATE_RESET,
device_state=0,
data=0,
plc_datatype=constants.PLCTYPE_BYTE,
)
# Retrieve list of received requests from server
requests = self.test_server.request_history
# Assert that the server received a request
self.assertEqual(len(requests), 1)
# Assert that the server received the correct command
self.assert_command_id(requests[0], constants.ADSCOMMAND_WRITECTRL)
def test_read_write(self):
write_value = 100
with self.plc:
read_value = self.plc.read_write(
index_group=constants.INDEXGROUP_DATA,
index_offset=1,
plc_read_datatype=constants.PLCTYPE_UDINT,
value=write_value,
plc_write_datatype=constants.PLCTYPE_UDINT,
)
# Retrieve list of received requests from server
requests = self.test_server.request_history
# Assert that the server received a request
self.assertEqual(len(requests), 1)
# Assert that the server received the correct command
self.assert_command_id(requests[0], constants.ADSCOMMAND_READWRITE)
# Check the value received by the server
received_value = struct.unpack("<I", requests[0].ams_header.data[16:])[0]
self.assertEqual(write_value, received_value)
# Check read value returned by server:
# Test server just returns repeated bytes of 0x0F terminated with 0x00
expected_result = struct.unpack("<I", "\x0F\x0F\x0F\x00".encode("utf-8"))[0]
self.assertEqual(read_value, expected_result)
def test_read_by_name(self):
handle_name = "TestHandle"
with self.plc:
read_value = self.plc.read_by_name(handle_name, constants.PLCTYPE_BYTE)
# Retrieve list of received requests from server
requests = self.test_server.request_history
# Assert that the server received 3 requests
self.assertEqual(len(requests), 3)
# Assert that Read/Write command was used to get the handle by name
self.assert_command_id(requests[0], constants.ADSCOMMAND_READWRITE)
# Assert that the server received the handle by name
received_value = requests[0].ams_header.data[16:]
sent_value = (handle_name + "\x00").encode("utf-8")
self.assertEqual(sent_value, received_value)
# Assert that next, the Read command was used to get the value
self.assert_command_id(requests[1], constants.ADSCOMMAND_READ)
# Assert that Write was used to release the handle
self.assert_command_id(requests[2], constants.ADSCOMMAND_WRITE)
# Check read value returned by server:
# Test server just returns repeated bytes of 0x0F terminated with 0x00
# But because the read value is only 1-byte long, we just get 0x00
expected_result = 0
self.assertEqual(read_value, expected_result)
def test_read_by_name_with_handle(self):
# type: () -> None
"""Test read_by_name method with handle passed in"""
handle_name = "TestHandle"
with self.plc:
handle = self.plc.get_handle(handle_name)
read_value = self.plc.read_by_name(
"", constants.PLCTYPE_BYTE, handle=handle
)
# Retrieve list of received requests from server
requests = self.test_server.request_history
# Assert that the server received 2 requests
self.assertEqual(len(requests), 2)
# Assert that the server received the handle by name
received_value = requests[0].ams_header.data[16:]
sent_value = (handle_name + "\x00").encode("utf-8")
self.assertEqual(sent_value, received_value)
# Assert that next, the Read command was used to get the value
self.assert_command_id(requests[1], constants.ADSCOMMAND_READ)
# Check read value returned by server:
# Test server just returns repeated bytes of 0x0F terminated with 0x00
# But because the read value is only 1-byte long, we just get 0x00
expected_result = 0
self.assertEqual(read_value, expected_result)
with self.plc:
self.plc.release_handle(handle)
def test_read_structure_by_name(self):
# type: () -> None
"""Test read by structure method"""
# TODO may need testserver.py changes to increase test usefulness
handle_name = "TestHandle"
structure_def = (("xVar", pyads.PLCTYPE_BYTE, 1),)
# test with no structure size passed in
with self.plc:
read_value = self.plc.read_structure_by_name(handle_name, structure_def)
# Retrieve list of received requests from server
requests = self.test_server.request_history
# Assert that the server received 3 requests
self.assertEqual(len(requests), 3)
# Assert that Read/Write command was used to get the handle by name
self.assert_command_id(requests[0], constants.ADSCOMMAND_READWRITE)
# Assert that the server received the handle by name
received_value = requests[0].ams_header.data[16:]
sent_value = (handle_name + "\x00").encode("utf-8")
self.assertEqual(sent_value, received_value)
# Assert that next, the Read command was used to get the value
self.assert_command_id(requests[1], constants.ADSCOMMAND_READ)
# Assert that Write was used to release the handle
self.assert_command_id(requests[2], constants.ADSCOMMAND_WRITE)
# Check read value returned by server:
# Test server just returns repeated bytes of 0x0F terminated with 0x00
# But because the read value is only 1-byte long, we just get 0x00
expected_result = OrderedDict([("xVar", 0)])
self.assertEqual(read_value, expected_result)
# Test with structure size passed in
structure_size = pyads.size_of_structure(structure_def)
with self.plc:
read_value = self.plc.read_structure_by_name(
handle_name, structure_def, structure_size=structure_size
)
self.assertEqual(read_value, expected_result)
# Test with handle passed in
with self.plc:
handle = self.plc.get_handle(handle_name)
read_value = self.plc.read_structure_by_name(
"", structure_def, handle=handle
)
self.assertEqual(read_value, expected_result)
with self.plc:
self.plc.release_handle(handle)
def test_write_by_name(self):
handle_name = "TestHandle"
value = "Test Value"
with self.plc:
self.plc.write_by_name(handle_name, value, constants.PLCTYPE_STRING)
# Retrieve list of received requests from server
requests = self.test_server.request_history
# Assert that the server received 3 requests
self.assertEqual(len(requests), 3)
# Assert that Read/Write command was used to get the handle by name
self.assert_command_id(requests[0], constants.ADSCOMMAND_READWRITE)
# Assert that Write command was used to write the value
self.assert_command_id(requests[1], constants.ADSCOMMAND_WRITE)
# Check the value written matches our value
received_value = requests[1].ams_header.data[12:].decode("utf-8").rstrip("\x00")
self.assertEqual(value, received_value)
# Assert that Write was used to release the handle
self.assert_command_id(requests[2], constants.ADSCOMMAND_WRITE)
def test_write_by_name_with_handle(self):
# type: () -> None
"""Test write_by_name method with handle passed in"""
handle_name = "TestHandle"
value = "Test Value"
with self.plc:
handle = self.plc.get_handle(handle_name)
self.plc.write_by_name("", value, constants.PLCTYPE_STRING, handle=handle)
# Retrieve list of received requests from server
requests = self.test_server.request_history
# Assert that the server received 2 requests
self.assertEqual(len(requests), 2)
# Assert that Read/Write command was used to get the handle by name
self.assert_command_id(requests[0], constants.ADSCOMMAND_READWRITE)
# Assert that Write command was used to write the value
self.assert_command_id(requests[1], constants.ADSCOMMAND_WRITE)
# Check the value written matches our value
received_value = requests[1].ams_header.data[12:].decode("utf-8").rstrip("\x00")
self.assertEqual(value, received_value)
with self.plc:
self.plc.release_handle(handle)
def test_device_notification(self):
def callback(adr, notification, user):
pass
handle_name = "test"
attr = pyads.NotificationAttrib(8)
requests = self.test_server.request_history
with self.plc:
notification, user = self.plc.add_device_notification(
handle_name, attr, callback
)
# Assert that Read/Write command was used to get the handle by name
self.assert_command_id(requests[0], constants.ADSCOMMAND_READWRITE)
# Assert that ADDDEVICENOTIFICATION was used to add device notification
self.assert_command_id(requests[1], constants.ADSCOMMAND_ADDDEVICENOTE)
self.plc.del_device_notification(notification, user)
# Assert that ADDDEVICENOTIFICATION was used to add device notification
self.assert_command_id(requests[2], constants.ADSCOMMAND_DELDEVICENOTE)
def test_multiple_connect(self):
"""
Using context manager multiple times after each other for
disconnecting and connecting to/from server should work without any
errors.
"""
handle_name = "TestHandle"
value = "Test Value"
with self.plc:
self.assertTrue(self.plc.is_open)
self.plc.write_by_name(handle_name, value, constants.PLCTYPE_STRING)
self.assertFalse(self.plc.is_open)
with self.plc:
self.assertTrue(self.plc.is_open)
self.plc.read_by_name(handle_name, constants.PLCTYPE_STRING)
self.assertFalse(self.plc.is_open)
def test_get_local_address(self):
# type: () -> None
"""Test get_local_address method."""
with self.plc:
self.plc.get_local_address()
def test_methods_with_closed_port(self):
# type: () -> None
"""Test pyads.Connection methods with no open port."""
with self.plc:
adr = self.plc.get_local_address()
self.assertIsNotNone(adr)
plc = pyads.Connection("127.0.0.1.1.1", 851)
self.assertIsNone(plc.get_local_address())
self.assertIsNone(plc.read_state())
self.assertIsNone(plc.read_device_info())
self.assertIsNone(plc.read_write(1, 2, pyads.PLCTYPE_INT, 1, pyads.PLCTYPE_INT))
self.assertIsNone(plc.read(1, 2, pyads.PLCTYPE_INT))
self.assertIsNone(plc.read_by_name("hello", pyads.PLCTYPE_INT))
self.assertIsNone(plc.get_handle("hello"))
self.assertIsNone(
plc.read_structure_by_name(
"hello", (("", pyads.PLCTYPE_BOOL, 1), ("", pyads.PLCTYPE_BOOL, 1))
)
)
self.assertIsNone(
plc.add_device_notification(
"test", pyads.NotificationAttrib(4), lambda x: x
)
)
def test_set_timeout(self):
# type: () -> None
"""Test timeout function."""
with self.plc:
self.assertIsNone(self.plc.set_timeout(100))
def test_get_and_release_handle(self):
# type: () -> None
"""Test get_handle and release_handle methods"""
handle_name = "TestHandle"
with self.plc:
handle = self.plc.get_handle(handle_name)
# Retrieve list of received requests from server
requests = self.test_server.request_history
# Assert that the server received a single request
self.assertEqual(len(requests), 1)
# Assert that Read/Write command was used to get the handle by name
self.assert_command_id(requests[0], constants.ADSCOMMAND_READWRITE)
# Assert that the server received the handle by name
received_value = requests[0].ams_header.data[16:]
sent_value = (handle_name + "\x00").encode("utf-8")
self.assertEqual(sent_value, received_value)
with self.plc:
self.plc.release_handle(handle)
# Retrieve list of received requests from server
requests = self.test_server.request_history
# Assert that the server history now has 2 requests
self.assertEqual(len(requests), 2)
# Assert that Write was used to release the handle
self.assert_command_id(requests[1], constants.ADSCOMMAND_WRITE)
if __name__ == "__main__":
unittest.main()
if __name__ == "__main__":
unittest.main()
|
python
|
# coding: utf-8
"""
Unit tests for the args module.
"""
import argparse
from datetime import datetime
import pytest
from ..src.args import parse_date, parse_args, parse_file_arg
def test_parse_date_invalid():
"""Ensure that invalid dates fail to parse."""
with pytest.raises(argparse.ArgumentTypeError, match="Not a valid date: 'test'."):
assert parse_date('test')
def test_parse_date_valid():
"""Ensure that proper dates parse correctly."""
assert parse_date('2019-06-27 13:20') == datetime(2019, 6, 27, 13, 20, 0, 0)
assert parse_date('2019-06-27') == datetime(2019, 6, 27, 0, 0, 0, 0)
def test_parse_args():
"""Ensure that a full list of arguments can get parsed correctly."""
args = ['-o', 'out', '-f', '[%date%] %filename%.%ext%', '-s', 'large', '-u', 'Twitter']
parsed = parse_args(args)
assert parsed.userid == ['Twitter']
assert parsed.o_userid is True
assert parsed.output == 'out'
assert parsed.format == '[%date%] %filename%.%ext%'
assert parsed.image_size == 'large'
def test_parse_file_arg_basic():
"""Ensure that parse_file_arg works for basic use cases."""
parsed = parse_file_arg('Twitter')
assert parsed == ['Twitter']
def test_parse_file_arg_file():
"""Ensure that parse_file_arg works for reading text files."""
test_file = 'test_args.txt'
with open(test_file, 'w') as file_descriptor:
file_descriptor.write('Twitter\nOther\nUser')
parsed = parse_file_arg('@' + test_file)
assert parsed == ['Twitter', 'Other', 'User']
|
python
|
from vk_bot.core.modules.basicplug import BasicPlug
class Texttobits(BasicPlug):
doc = "Зашифровать сообщение в бинарный код"
command = ("бинарный0",)
def main(self):
text = ' '.join(self.text[1:])
bits = bin(int.from_bytes(text.encode('utf-8', 'surrogatepass'), 'big'))[2:]
encode = bits.zfill(8 * ((len(bits) + 7) // 8))
self.sendmsg(str(encode))
|
python
|
import torch
import numpy as np
class ReplayBuffer(object):
def __init__(self, buffer_mem, num_states, num_actions):
self.switch = 0
self.buffer_mem = buffer_mem
self.count_mem = 0
self.num_states = num_states
self.num_actions = num_actions
self.state = torch.zeros((buffer_mem, num_states))
self.action = torch.zeros((buffer_mem, num_actions))
self.reward = torch.zeros(buffer_mem)
self.next_state = torch.zeros((buffer_mem, num_states))
self.terminal = torch.zeros(buffer_mem)
def load_buffer(self, state, action, reward, next_state, done):
# this will start rewriting the buffer memory after getting full
index = self.count_mem % self.buffer_mem
self.state[index] = torch.tensor(state)
# one hot vector fot the actions values
actions = torch.zeros(self.num_actions)
actions[action] = 1.0
self.action[index] = actions
self.reward[index] = torch.tensor(reward)
self.next_state[index] = torch.tensor(next_state)
self.terminal[index] = 1 - int(done)
self.count_mem += 1
def get_batch(self, batch_size):
if self.switch == 0 and self.reward[-1] == 0:
idx = np.random.choice(self.count_mem - batch_size)
else:
self.switch = 1
idx = np.random.choice(self.buffer_mem - batch_size)
state = self.state[idx: idx+batch_size]
action = self.action[idx: idx+batch_size]
reward = self.reward[idx: idx+batch_size]
next_state = self.next_state[idx: idx+batch_size]
terminal = self.terminal[idx: idx+batch_size]
return state, action, reward, next_state, terminal
|
python
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import zipfile
import numpy as np
class BaseModel(nn.Module):
def __init__(self, args, vocab, tag_size):
super(BaseModel, self).__init__()
self.args = args
self.vocab = vocab
self.tag_size = tag_size
def save(self, path):
# Save model
print(f'Saving model to {path}')
ckpt = {
'args': self.args,
'vocab': self.vocab,
'state_dict': self.state_dict()
}
torch.save(ckpt, path)
def load(self, path):
# Load model
print(f'Loading model from {path}')
ckpt = torch.load(path)
self.vocab = ckpt['vocab']
self.args = ckpt['args']
self.load_state_dict(ckpt['state_dict'])
def load_embedding(vocab, emb_file, emb_size):
"""
Read embeddings for words in the vocabulary from the emb_file (e.g., GloVe, FastText).
Args:
vocab: (Vocab), a word vocabulary
emb_file: (string), the path to the embdding file for loading
emb_size: (int), the embedding size (e.g., 300, 100) depending on emb_file
Return:
emb: (np.array), embedding matrix of size (|vocab|, emb_size)
"""
emb_model = np.load(emb_file ,allow_pickle=True).item()
emb_matrix = []
for word in vocab.word2id.keys():
if emb_model.get(word) is not None:
emb_matrix.append(emb_model.get(word))
else:
emb_matrix.append(np.random.random(emb_size))
return np.array(emb_matrix)
# raise NotImplementedError()
class DanModel(BaseModel):
def __init__(self, args, vocab, tag_size):
super(DanModel, self).__init__(args, vocab, tag_size)
self.define_model_parameters()
self.init_model_parameters()
# Use pre-trained word embeddings if emb_file exists
if args.emb_file is not None:
self.copy_embedding_from_numpy()
def define_model_parameters(self):
"""
Define the model's parameters, e.g., embedding layer, feedforward layer.
"""
self.embed = nn.Embedding(len(self.vocab), self.args.emb_size)
# self.emb_drop = nn.Dropout(self.args.emb_drop)
# self.emb_bn = nn.BatchNorm1d(self.args.emb_size)
fully_connected_layers = []
for i in range(self.args.hid_layer):
if i == 0:
fully_connected_layers.append(nn.Linear(self.args.emb_size, self.args.hid_size))
fully_connected_layers.append(nn.Dropout(self.args.hid_drop))
# fully_connected_layers.append(nn.BatchNorm1d(self.args.hid_size))
elif i == self.args.hid_layer - 1:
fully_connected_layers.append(nn.Linear(self.args.hid_size, self.tag_size))
else:
fully_connected_layers.append(nn.Linear(self.args.hid_size, self.args.hid_size))
fully_connected_layers.append(nn.Dropout(self.args.hid_drop))
#fully_connected_layers.append(nn.BatchNorm1d(self.args.hid_size))
self.fc = nn.ModuleList(fully_connected_layers)
# raise NotImplementedError()
def init_model_parameters(self):
"""
Initialize the model's parameters by uniform sampling from a range [-v, v], e.g., v=0.08
"""
for layer in self.fc:
if isinstance(layer, nn.Linear):
nn.init.uniform_(layer.weight, -0.08, 0.08)
# raise NotImplementedError()
def copy_embedding_from_numpy(self):
"""
Load pre-trained word embeddings from numpy.array to nn.embedding
"""
emb = load_embedding(self.vocab, self.args.emb_file, self.args.emb_size)
self.embed.weight = torch.nn.Parameter(torch.from_numpy(emb))
self.embed.weight.requires_grad = False
# raise NotImplementedError()
def forward(self, x):
"""
Compute the unnormalized scores for P(Y|X) before the softmax function.
E.g., feature: h = f(x)
scores: scores = w * h + b
P(Y|X) = softmax(scores)
Args:
x: (torch.LongTensor), [batch_size, seq_length]
Return:
scores: (torch.FloatTensor), [batch_size, ntags]
"""
X = self.embed(x)
X = X.mean(dim=1)
# X = self.emb_drop(X)
# X = self.emb_bn(X)
for layer in self.fc:
if isinstance(layer, nn.Linear):
X = F.relu(layer(X))
else:
X = layer(X)
return X
# raise NotImplementedError()
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 25 18:28:27 2019
@author: Arthur
"""
pass
|
python
|
from .preprocess import *
from . import preprocess_utils
from .epipolarloss import *
from .kploss import *
|
python
|
import csv
import sys
import urllib3 as ulib
import pandas as pd
import datetime
from bs4 import BeautifulSoup as bs
ulib.disable_warnings()
try:
#Create csv file only with column headers
columnNames = ['CompanyName','Year','Ergebnis je Aktie (unverwaessert, nach Steuern)','Ergebnis je Aktie (verwaessert, nach Steuern)',
'Dividende je Aktie','Gesamtdividendenausschuettung in Mio.',
'Umsatz je Aktie','KGV (Jahresendkurs)','KGV (Jahresendkurs, EPS verwaessert)','Dividendenrendite Jahresende in %',
'Eigenkapitalquote in %', 'Fremdkapitalquote in %', 'Umsatzerloese', 'Umsatzveraenderung in %', 'Bruttoergebnis vom Umsatz', 'Bruttoergebnisveraenderung in %',
'Operatives Ergebnis', 'Veraenderung Operatives Ergebnis in %', 'Ergebnis vor Steuern', 'Veraenderung Ergebnis vor Steuern in %',
'Ergebnis nach Steuer', 'Veraenderung Ergebnis nach Steuer in %', 'Gesamtverbindlichkeiten', 'Langzeit Gesamtverbindlichk. pro Aktie',
'Eigenkapital', 'Veraenderung Eigenkapital in %', 'Bilanzsumme', 'Veraenderung Bilanzsumme in %', 'Gewinn je Aktie (unverwaessert, nach Steuern)',
'Veraenderung EPS (unverwässert) in %', 'Gewinn je Aktie (verwaessert, nach Steuern)', 'Veraenderung EPS (verwässert) in %', 'Dividende je Aktie',
'Veraenderung Dividende je Aktie in %', 'Anzahl Mitarbeiter', 'Veraenderung Anzahl Mitarbeiter in %']
df = pd.DataFrame(columns=columnNames)
csv_name = "FinanzenNet_" + datetime.datetime.now().isoformat().replace(":","_")[:19] + "_GuV-Data.csv"
df.to_csv(csv_name, encoding='utf-8')
f_csv = open(csv_name, 'a') # Open csv file in append mode
#Read URLs from csv and store them in a list
with open('SP_500_Finanznet_urls.csv', 'r') as f:
reader = csv.reader(f)
urls = list(reader)
#urls = ['https://www.finanzen.net/bilanz_guv/General_Mills','https://www.finanzen.net/bilanz_guv/Kellogg','https://www.finanzen.net/bilanz_guv/PepsiCo']
#Loop opver all urls in list
for url in urls[:]:
print(url[0])
#Open url and parse data
http = ulib.PoolManager()
response = http.request('GET', url[0])
soup = bs(response.data,"lxml")
#Initialize dataframes for each year
df1 = pd.DataFrame(columns=columnNames,index=range(1))
df2 = pd.DataFrame(columns=columnNames,index=range(1))
df3 = pd.DataFrame(columns=columnNames,index=range(1))
df4 = pd.DataFrame(columns=columnNames,index=range(1))
df5 = pd.DataFrame(columns=columnNames,index=range(1))
df6 = pd.DataFrame(columns=columnNames,index=range(1))
df7 = pd.DataFrame(columns=columnNames,index=range(1))
df_array = [df1,df2,df3,df4,df5,df6,df7]
#Fill CompanyName and Year Column
startyear = int(soup.find_all("table")[1].find_all("thead")[0].find_all("th")[2].contents[0])
for i in range (7):
df_array[i]['CompanyName'] = url[0].split('/')[4]
df_array[i]['Year'] = startyear+i
#Find all tables in parsed url
table = soup.find_all("table", { "class" : "table" })
counter = 1
#Loop over all tables
for i, mytable in enumerate(table):
if i != 0: #First table is irrelevant
try:
rows = mytable.find_all('tr')
#Loop over rows of table
for j, tr in enumerate(rows):
if j != 0: #First row is irrelevant (Headers)
cols = tr.find_all('td')
counter += 1
#Loop over colummns in row
for k, td in enumerate(cols):
if k > 1: # first two columns (checkbox, Name) are irrelevant
#Store data from each column in a different dataframe
df_array[k-2][columnNames[counter]] = td.text
except Exception as e:
print("Error in For-Loop")
print (e)
#Loop over all dataframes and write them to csv file
for i in range(7):
df_array[i].to_csv(f_csv, header = False, encoding='utf-8',index=False)
print("-scraped! Startyear: " + str(startyear))
#Successfully scraped all urls
f_csv.close()
print("SUCCESS")
except KeyboardInterrupt:
f_csv.close()
print("Ausführung manuell beendet")
except Exception as e:
print("Error occured")
f_csv.close()
type, value, traceback = sys.exc_info()
print('Error opening %s: %s' % (value.filename, value.strerror))
|
python
|
import os
import numpy as np
import argparse
from sklearn.model_selection import StratifiedKFold
#from data.image_folder import make_dataset
#from tensorflow.keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
import json
import pandas as pd
import numpy as np
import os, sys
import glob
import re
import hashlib
import pathlib
import cv2
#TO-DO acrescentar isTB no options
isTB = True
#from options.train_options import TrainOptions
#from data import create_dataset
#from models import create_model
#from rxwgan.models import *
#from rxwgan.wgangp import wgangp_optimizer
#from rxcore import stratified_train_val_test_splits
def run(command: object) -> object:
print(command)
exit_status = os.system(command)
if exit_status > 0:
exit(1)
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
'.tif', '.TIF', '.tiff', '.TIFF',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def expand_folder( path , extension):
l = glob.glob(path+'/*.'+extension)
l.sort()
return l
def get_md5(path):
return hashlib.md5(pathlib.Path(path).read_bytes()).hexdigest()
#import numpy as np
#
# Split train/val/test splits
#
def stratified_train_val_test_splits( df_kfold, seed=512 ):
cv_train_test = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
cv_train_val = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)
sorts_train_test = []
for train_val_idx, test_idx in cv_train_test.split( df_kfold.values,df_kfold.target.values ):
train_val_df = df_kfold.iloc[train_val_idx]
sorts = []
for train_idx, val_idx in cv_train_val.split( train_val_df.values, train_val_df.target.values ):
sorts.append((train_val_df.index[train_idx].values, train_val_df.index[val_idx].values, test_idx))
sorts_train_test.append(sorts)
return sorts_train_test
def make_dataset(dir, max_dataset_size=float("inf")):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
return images[:min(max_dataset_size, len(images))]
def prepare_my_table(clinical_path, images_path, masks_path, combine = False):
d = {
'target': [],
'image_ID': [],
'raw_image_path': [],
'mask_image_path': [],
'paired_image_path': [],
'raw_image_md5': [],
'age': [],
'sex': [],
'comment': [],
}
def treat_string(lines):
string = ''
for s in lines:
string += s.replace('\n', '').replace('\t', '')
return re.sub(' +', ' ', string)
for idx, path in enumerate(expand_folder(clinical_path, 'txt')):
with open(path, 'r') as f:
lines = f.readlines()
sex = 'male' if 'male' in lines[0] else 'female' # 1 for male and 0 for female
age = int(re.sub('\D', '', lines[0]))
# get TB by file name (_1.txt is PTB or _0.txt is NTB)
target = 1 if '_1.txt' in path else 0
filename = path.split('/')[-1]
image_filename = filename.replace('txt', 'png')
# image_path = images_path+('/tb/' if target else '/no_tb/')+image_filename
image_path = images_path + '/' + image_filename
d['target'].append(target)
d['age'].append(age)
d['sex'].append(sex)
d['raw_image_path'].append(image_path)
d['raw_image_md5'].append(get_md5(image_path))
d['mask_image_path'].append('')
d['paired_image_path'].append('')
d['comment'].append(treat_string(lines[1::]))
d['image_ID'].append(filename.replace('.txt', ''))
l_masks = make_dataset(masks_path)
for mask in l_masks:
if image_path[-17:] == mask[-17:]:
idx = np.where(np.array(d['raw_image_path']) == image_path)[0][0]
d['mask_image_path'][idx] = mask
if combine == True:
path_paired = image_path[:-25] + 'foldAB'
path_paired_img = path_paired + '/' + image_path[-17:]
d['paired_image_path'][idx] = path_paired_img
if not os.path.isdir(path_paired):
os.makedirs(path_paired)
im_A = cv2.imread(image_path)
im_B = cv2.imread(mask)
im_AB = np.concatenate([im_B, im_A], 1)
cv2.imwrite(path_paired_img, im_AB)
return pd.DataFrame(d)
# NOTE: this is optional.
#from rxcore import allow_tf_growth
#allow_tf_growth()
#
# Start your job here
#
#job = json.load(open(args.job, 'r'))
#sort = job['sort']
#target = 1 # tb active
#test = job['test']
seed = 512
#epochs = 1000
#batch_size = 32
base_data_raw_path = '/Users/ottotavares/Documents/COPPE/projetoTB/China/CXR_png/unaligned'
clinical_path = base_data_raw_path + '/ClinicalReadings'
images_path = base_data_raw_path + '/trainA'
masks_path = base_data_raw_path + '/trainB'
df = prepare_my_table(clinical_path, images_path, masks_path, combine = True)
splits = stratified_train_val_test_splits(df,seed)[0]
training_data = df.iloc[splits[0][0]]
validation_data = df.iloc[splits[0][1]]
if(isTB == True):
train_tb = training_data.loc[df.target==1]
val_tb = validation_data.loc[df.target==1]
else:
train_ntb = training_data.loc[df.target==0]
val_ntb = validation_data.loc[df.target == 0]
#training_data = training_data.loc[training_data.target==target]
#validation_data = validation_data.loc[validation_data.target==target]
extra_d = {'sort' : sort, 'test':test, 'target':target, 'seed':seed}
# Run!
#history = optimizer.fit( train_generator , val_generator, extra_d=extra_d, wandb=wandb )
combine_ab = 'python datasets/combine_A_and_B.py --fold_A /Users/ottotavares/Documents/COPPE/projetoTB/China/CXR_png/unaligned/trainA --fold_B /Users/ottotavares/Documents/COPPE/projetoTB/China/CXR_png/unaligned/trainB --fold_AB /Users/ottotavares/Documents/COPPE/projetoTB/China/CXR_png/unaligned'
run(combine_ab)
# pix2pix train/test
#train_cmd = 'python train.py --model pix2pix --name ' + 'test_%d_sort_%d'%(test,sort) + '--dataroot . --n_epochs 1 --n_epochs_decay 5 --save_latest_freq 10 --display_id -1'
#run(train_cmd)
#run('python test.py --model pix2pix --name temp_pix2pix --dataroot ./datasets/mini_pix2pix --num_test 1')
|
python
|
# Return some system information
# Because the universe will never have enough scripts that do this
# It's only going to work on fairly recent *NIX platforms
import os, socket
uname_ = os.uname()
system = uname_[0]
host = socket.gethostname()
kernel = uname_[2]
release = uname_[3]
processor = uname_[4]
|
python
|
## Copyright 2004-2007 Virtutech AB
##
## The contents herein are Source Code which are a subset of Licensed
## Software pursuant to the terms of the Virtutech Simics Software
## License Agreement (the "Agreement"), and are being distributed under
## the Agreement. You should have received a copy of the Agreement with
## this Licensed Software; if not, please contact Virtutech for a copy
## of the Agreement prior to using this Licensed Software.
##
## By using this Source Code, you agree to be bound by all of the terms
## of the Agreement, and use of this Source Code is subject to the terms
## the Agreement.
##
## This Source Code and any derivatives thereof are provided on an "as
## is" basis. Virtutech makes no warranties with respect to the Source
## Code or any derivatives thereof and disclaims all implied warranties,
## including, without limitation, warranties of merchantability and
## fitness for a particular purpose and non-infringement.
import copy
from sim_core import *
from cli import *
from configuration import *
# This is the generic flash-memory support provided by Simics
# The following flash memories are pre-configured:
# * Intel 28FxxxC3x (advanced boot block flash)
# * Intel 28FxxxJ3 (strataflash)
# * Intel 28FxxxJ3A (strataflash)
# * Intel 28FxxxP30x (strataflash)
# * Intel 28F160S3
# * Intel 28F320S3
# * Am29F040B
# * Am29F016D
# * Am29SL160CT
# * Am29LV64xD (and all L/H versions)
# * Am29LV640MH
# * Am29LV160MB
# * SG29GL064M
# * Am29DL323B
# * Am29DL323G_
# * MBM29LV650UE
# * Amd S29GLxxxN
# * AT49BV001A
# * Am29DL163D
# To add other flash configuration, edit the flash_description table to fill in
# the correct values for the flash you want to simulate. Add also a finish
# function that will parse the product name to complete the information. You
# can use the default function if no more configuration is needed.
# You can then use the add_flash_memory() function to add a flash memory in a
# standard python-based Simics configuration, or use the flash_create_memory()
# function that will simply return a list of objects to add to your own
# configuration
# The following commands are supported for Intel flash:
# - CFI support (if any)
# - read identifier mode (including block locking status)
# - read/clear status register
# - write buffer
# - word program
# - block erase
# - simple locking scheme (strataflash) / advanced locking scheme
# The following commands are supported for AMD flash:
# - CFI support (if any)
# - autoselect mode (including protect verify)
# - program
# - sector erase
# - unlock bypass/program/reset (not tested properly)
########################################################
# EDIT TO ADD A NEW FLASH TYPE --- Flash Configuration #
########################################################
#
# Default completion function
#
# finish(product_no, config) -> (config_updated, size of one flash chip, in bytes)
def finish_default(product_no, config):
size = sum(config["unit-size"]) # compute total size
return (config, size)
#
# Completion function for:
# Intel 28F800C3T
# 160 B
# 320
# 640
#
# finish(product_no, config) -> (config_updated, size of one flash chip, in bytes)
def finish_config_28F___C3_(product_no, config):
# set size-dependent parameters
if product_no[3:6] == "800": # 8Mbit, Bottom
device_geometry_definition = [0x14, 0x01, 0x00, 0x00, 0x00, 0x02, 0x07, 0x00, 0x20, 0x00, 0x0E, 0x00, 0x00, 0x01]
device_id = 0x88C1
big_blocks = 15
elif product_no[3:6] == "160": # 16Mbit, Bottom
device_geometry_definition = [0x15, 0x01, 0x00, 0x00, 0x00, 0x02, 0x07, 0x00, 0x20, 0x00, 0x1E, 0x00, 0x00, 0x01]
device_id = 0x88C3
big_blocks = 31
elif product_no[3:6] == "320": # 32Mbit, Bottom
device_geometry_definition = [0x16, 0x01, 0x00, 0x00, 0x00, 0x02, 0x07, 0x00, 0x20, 0x00, 0x3E, 0x00, 0x00, 0x01]
device_id = 0x88C5
big_blocks = 63
elif product_no[3:6] == "640": # 64Mbit, Bottom
device_geometry_definition = [0x17, 0x01, 0x00, 0x00, 0x00, 0x02, 0x07, 0x00, 0x20, 0x00, 0x7E, 0x00, 0x00, 0x01]
device_id = 0x88CD
big_blocks = 127
else:
return "The product no (" + product_no + ") should contain a valid size (800, 160, 320 or 640), not '" + product_no[3:6] + "'"
# size
size = 1 << device_geometry_definition[0]
# check what where the boot block is
if product_no[8] == "T":
boot_block = "top"
elif product_no[8] == "B":
boot_block = "bottom"
else:
return "The product no (" + product_no + ") should end with T (for top) or B (for bottom), not '" + product_no[8] + "'"
# cfi_query
for i in range(0x27, 0x2D):
config["cfi-query"][i] = device_geometry_definition[i - 0x27]
if boot_block == "bottom":
# bottom blocking is already configured
for i in range(0x2D, 0x35):
config["cfi-query"][i] = device_geometry_definition[i - 0x27]
else:
# top blocking is inverted
for i in range(0x2D, 0x31):
config["cfi-query"][i] = device_geometry_definition[i - 0x27 + 4]
for i in range(0x31, 0x35):
config["cfi-query"][i] = device_geometry_definition[i - 0x27 - 4]
# device-id
if boot_block == "top":
config['device-id'] = device_id - 1
else:
config['device-id'] = device_id
# unit_size
if boot_block == "top":
config['unit-size'] = [0x10000 for i in range(big_blocks)] + [0x2000 for i in range(8)]
else:
config['unit-size'] = [0x2000 for i in range(8)] + [0x10000 for i in range(big_blocks)]
return (config, size)
#
# Completion function for:
# Intel 28F160S3
# 320
#
#
# finish(product_no, config) -> (config_updated, size of one flash chip, in bytes)
def finish_config_28F___S3 (product_no, config):
# set size-dependendt parameters
if product_no[3:6] == "160": # 16Mbit
device_geometry_definition = [0x15, 0x02, 0x00, 0x05, 0x00, 0x01, 0x1f, 0x00, 0x00, 0x01]
config['device-id'] = 0xd0
blocks = 32
elif product_no[3:6] == "320": # 32Mbit
device_geometry_definition = [0x16, 0x02, 0x00, 0x05, 0x00, 0x01, 0x3f, 0x00, 0x00, 0x01]
config['device-id'] = 0xd4
blocks = 64
else:
return "The product no (" + product_no + ") should contain a valid size (160 or 320), not '" + product_no[3:6] + "'"
# size
size = 1 << device_geometry_definition[0]
# cfi_query
for i in range(0x27, 0x31):
config["cfi-query"][i] = device_geometry_definition[i - 0x27]
config['unit-size'] = [0x10000 for i in range(blocks)]
return (config, size)
#
# Completion function for:
# Intel 28F320J3A
# 640
# 128
#
# finish(product_no, config) -> (config_updated, size of one flash chip, in bytes)
def finish_config_28F___J3A(product_no, config):
# set size-dependent parameters
if product_no[3:6] == "320": # 32Mbit
device_geometry_definition = [0x16, 0x02, 0x00, 0x05, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x02]
config['device-id'] = 0x0016
blocks = 32
elif product_no[3:6] == "640": # 64Mbit
device_geometry_definition = [0x17, 0x02, 0x00, 0x05, 0x00, 0x01, 0x3F, 0x00, 0x00, 0x02]
config['device-id'] = 0x0017
blocks = 64
elif product_no[3:6] == "128": # 128Mbit
device_geometry_definition = [0x18, 0x02, 0x00, 0x05, 0x00, 0x01, 0x7F, 0x00, 0x00, 0x02]
config['device-id'] = 0x0018
blocks = 128
else:
return "The product no (" + product_no + ") should contain a valid size (320, 640 or 128), not '" + product_no[3:6] + "'"
# size
size = 1 << device_geometry_definition[0]
# cfi_query
for i in range(0x27, 0x31):
config["cfi-query"][i] = device_geometry_definition[i - 0x27]
config['unit-size'] = [0x20000 for i in range(blocks)]
return (config, size)
#
# Completion function for:
# Intel 28F320J3
# 640
# 128
# 256
#
# finish(product_no, config) -> (config_updated, size of one flash chip, in bytes)
def finish_config_28F___J3(product_no, config):
# set size-dependent parameters
if product_no[3:6] == "320": # 32Mbit
device_geometry_definition = [0x16, 0x02, 0x00, 0x05, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x02]
config['device-id'] = 0x0016
blocks = 32
elif product_no[3:6] == "640": # 64Mbit
device_geometry_definition = [0x17, 0x02, 0x00, 0x05, 0x00, 0x01, 0x3F, 0x00, 0x00, 0x02]
config['device-id'] = 0x0017
blocks = 64
elif product_no[3:6] == "128": # 128Mbit
device_geometry_definition = [0x18, 0x02, 0x00, 0x05, 0x00, 0x01, 0x7F, 0x00, 0x00, 0x02]
config['device-id'] = 0x0018
blocks = 128
elif product_no[3:6] == "256": # 256Mbit
device_geometry_definition = [0x19, 0x02, 0x00, 0x05, 0x00, 0x01, 0xFF, 0x00, 0x00, 0x02]
config['device-id'] = 0x001D
blocks = 256
else:
return "The product no (" + product_no + ") should contain a valid size (320, 640, 128 or 256), not '" + product_no[3:6] + "'"
# size
size = 1 << device_geometry_definition[0]
# cfi_query
for i in range(0x27, 0x31):
config["cfi-query"][i] = device_geometry_definition[i - 0x27]
config['unit-size'] = [0x20000 for i in range(blocks)]
return (config, size)
# Completion function for:
# Intel P30 strataflash
#
def finish_config_28F___P30_(product_no, config):
# Add intel specific extended query data
config['cfi-query'] += [0x00] * (0x10a - len(config['cfi-query']))
config['cfi-query'] += [0x50, 0x52, # 0x10a
0x49, 0x31, 0x34, 0xe6, # 0x10c
0x01, 0x00, 0x00, 0x01, # 0x110
0x03, 0x00, 0x18, 0x90, # 0x114
0x02, 0x80, 0x00, 0x03, # 0x118
0x03, 0x89, 0x00, 0x00, # 0x11c
0x00, 0x00, 0x00, 0x00, # 0x120
0x10, 0x00, 0x04, 0x03, # 0x124
0x04, 0x01, 0x02, 0x03, # 0x128
0x07, 0x01, 0x24, 0x00, # 0x12c
0x01, 0x00, 0x11, 0x00, # 0x130
0x00, 0x02, None, None, # 0x134
None, None, 0x64, 0x00, # 0x138
0x02, 0x03, 0x00, 0x80, # 0x13c
0x00, 0x00, 0x00, 0x80, # 0x140
None, None, None, None, # 0x144
0x64, 0x00, 0x02, 0x03, # 0x148
0x00, 0x80, 0x00, 0x00, # 0x14c
0x00, 0x80, 0xff, 0xff, # 0x150
0xff, 0xff, 0xff] # 0x154
# Where is the boot block?
if product_no[-1] == "T":
boot_block = "top"
elif product_no[-1] == "B":
boot_block = "bottom"
else:
return ("The product no (" + product_no + ") should end with TQ0/T00 "
"(for top) or BQ0/B00 (for bottom), not '"
+ product_no[-3] + "'")
# Chip size?
if product_no[3:6] == "640": # 64 Mbit
blocks = 64
config['device-id'] = iff(boot_block == "bottom", 0x881a, 0x8817)
device_geometry = [0x17, 0x01, 0x00, 0x06, 0x00, 0x02]
if boot_block == "bottom":
device_geometry += [0x03, 0x00, 0x80, 0x00, 0x3e, 0x00, 0x00, 0x02]
else:
device_geometry += [0x3e, 0x00, 0x00, 0x02, 0x03, 0x00, 0x80, 0x00]
device_geometry += [0x00, 0x00, 0x00, 0x00]
if boot_block == "bottom":
config['cfi-query'][0x136:0x13a] = [0x03, 0x00, 0x80, 0x00]
config['cfi-query'][0x144:0x148] = [0x3e, 0x00, 0x00, 0x02]
else:
config['cfi-query'][0x136:0x13a] = [0x3e, 0x00, 0x00, 0x02]
config['cfi-query'][0x144:0x148] = [0x03, 0x00, 0x80, 0x00]
elif product_no[3:6] == "128": # 128 Mbit
blocks = 128
config['device-id'] = iff(boot_block == "bottom", 0x881b, 0x8818)
device_geometry = [0x18, 0x01, 0x00, 0x06, 0x00, 0x02]
if boot_block == "bottom":
device_geometry += [0x03, 0x00, 0x80, 0x00, 0x7e, 0x00, 0x00, 0x02]
else:
device_geometry += [0x7e, 0x00, 0x00, 0x02, 0x03, 0x00, 0x80, 0x00]
device_geometry += [0x00, 0x00, 0x00, 0x00]
if boot_block == "bottom":
config['cfi-query'][0x136:0x13a] = [0x03, 0x00, 0x80, 0x00]
config['cfi-query'][0x144:0x148] = [0x7e, 0x00, 0x00, 0x02]
else:
config['cfi-query'][0x136:0x13a] = [0x7e, 0x00, 0x00, 0x02]
config['cfi-query'][0x144:0x148] = [0x03, 0x00, 0x80, 0x00]
elif product_no[3:6] == "256": # 256 Mbit
blocks = 256
config['device-id'] = iff(boot_block == "bottom", 0x891c, 0x8919)
device_geometry = [0x19, 0x01, 0x00, 0x06, 0x00, 0x02]
if boot_block == "bottom":
device_geometry += [0x03, 0x00, 0x80, 0x00, 0xfe, 0x00, 0x00, 0x02]
else:
device_geometry += [0xfe, 0x00, 0x00, 0x02, 0x03, 0x00, 0x80, 0x00]
device_geometry += [0x00, 0x00, 0x00, 0x00]
if boot_block == "bottom":
config['cfi-query'][0x136:0x13a] = [0x03, 0x00, 0x00, 0x80]
config['cfi-query'][0x144:0x148] = [0xfe, 0x00, 0x00, 0x02]
else:
config['cfi-query'][0x136:0x13a] = [0xfe, 0x00, 0x00, 0x02]
config['cfi-query'][0x144:0x148] = [0x03, 0x00, 0x00, 0x80]
else:
return ("The product no (" + product_no + ") should contain a valid "
"size specification (640/128/256), not '"
+ product_no[3:6] + "'")
size = 1 << device_geometry[0]
for i in range(0x27, 0x39):
config['cfi-query'][i] = device_geometry[i - 0x27]
if boot_block == "top":
config['unit-size'] = [0x20000] * (blocks - 1) + [0x8000] * 4
else:
config['unit-size'] = [0x8000] * 4 + [0x20000] * (blocks - 1)
return (config, size)
#
# Completion function for:
# Am29DL323GB
# Am29DL323GT
#
# finish(product_no, config) -> (config_updated, size of one flash chip, in bytes)
def finish_config_Am29DL323G_(product_no, config):
# check what where the boot block is
if product_no[-1] == "T":
boot_block = "top"
elif product_no[-1] == "B":
boot_block = "bottom"
else:
return "The product no (" + product_no + ") should end with T (for top) or B (for bottom), not '" + product_no[-1] + "'"
if boot_block == "top":
config['device-id'] = 0x2250
config['unit-size'] = [0x10000]*63 + [0x2000]*8
config["cfi-query"][0x4f] = 0x03
else:
config['device-id'] = 0x2253
config['unit-size'] = [0x2000]*8 + [0x10000]*63
config["cfi-query"][0x4f] = 0x02
return finish_default(product_no, config)
#
# Completion function for:
# S29GL128N
# S29GL256N
# S29GL512N
#
def finish_config_S29GL___N(product_no, config):
# check size
if product_no[5:8] == "128":
size = 128
elif product_no[5:8] == "256":
size = 256
elif product_no[5:8] == "512":
size = 512
else:
return "The product no (" + product_no + ") is not supported. Only 128,256 or 512 Mbit are supported."
config['unit-size'] = [128*1024]*size
if size == 128:
config["cfi-query"][0x27] = 0x18
config["cfi-query"][0x2d] = 0x7f
config["cfi-query"][0x2e] = 0x00
elif size == 256:
config["cfi-query"][0x27] = 0x19
config["cfi-query"][0x2d] = 0xff
config["cfi-query"][0x2e] = 0x00
else:
config["cfi-query"][0x27] = 0x1a
config["cfi-query"][0x2d] = 0xff
config["cfi-query"][0x2e] = 0x01
# not sure on this one
config["cfi-query"][0x4f] = 0x04 # bottom WP protect
#config["cfi-query"][0x4f] = 0x05 # top WP protect
return finish_default(product_no, config)
#
# list of completion functions
#
complete_functions = {
"28F___C3_" : finish_config_28F___C3_,
"28F___J3A" : finish_config_28F___J3A,
"28F___J3" : finish_config_28F___J3,
"28F___S3" : finish_config_28F___S3,
"28F___P30_" : finish_config_28F___P30_,
"82802-8" : finish_default,
"Am29F040B" : finish_default,
"Am29F016D" : finish_default,
"Am29SL160CT": finish_default,
"Am29LV640MH": finish_default,
"Am29LV64_D": finish_default,
"Am29LV160MB": finish_default,
"SG29GL064M": finish_default,
"Am29DL323B": finish_default,
"Am29DL323G_": finish_config_Am29DL323G_,
"MBM29LV650UE": finish_default,
"S29GL___N": finish_config_S29GL___N,
"AT49BV001A": finish_default,
"AT49BV001AT": finish_default,
"Am29DL163D": finish_default,
}
#
# static description of flash memory chips
#
flash_descriptions = {
"28F___C3_" : {
"cfi-query" : [0x89, 0x00, 0x00, 0x00, # 0x00
0x00, 0x00, 0x00, 0x00, # 0x04
0x00, 0x00, 0x00, 0x00, # 0x08
0x00, 0x00, 0x00, 0x00, # 0x0C
0x51, 0x52, 0x59, 0x03, # 0x10
0x00, 0x35, 0x00, 0x00, # 0x14
0x00, 0x00, 0x00, 0x27, # 0x18
0x36, 0xB4, 0xC6, 0x05, # 0x1C
0x00, 0x0A, 0x00, 0x04, # 0x20
0x00, 0x03, 0x00, None, # 0x24
None, None, None, None, # 0x28
None, None, None, None, # 0x2C
None, None, None, None, # 0x30
None, # 0x34
0x50, 0x52, 0x49, 0x31, # 0x35 Extended Query
0x30, 0x66, 0x00, 0x00, # 0x39
0x00, 0x01, 0x03, 0x00, # 0x3D
0x33, 0xC0, 0x01, 0x80, # 0x41
0x00, 0x03, 0x03], # 0x45
"device-id" : None,
"manufacturer-id" : 0x0089, # intel
"max-chip-width" : 16, # 16-bits chips
"unit-size" : None,
"intel_write_buffer" : 0, # no write-buffer in C3
"intel_protection_program" : 1,
"intel_configuration" : 1,
"intel_lock" : 2 # advanced locking
},
"28F___P30_" : {
"cfi-query" : [0x89, 0x00, 0x00, 0x00, # 0x00
0x00, 0x00, 0x00, 0x00, # 0x04
0x00, 0x00, 0x00, 0x00, # 0x08
0x00, 0x00, 0x00, 0x00, # 0x0c
0x51, 0x52, 0x59, 0x01, # 0x10
0x00, 0x0a, 0x01, 0x00, # 0x14
0x00, 0x00, 0x00, 0x17, # 0x18
0x20, 0x85, 0x95, 0x08, # 0x1c
0x09, 0x0a, 0x00, 0x01, # 0x20
0x01, 0x02, 0x00, None, # 0x24
0x01, 0x00, 0x06, 0x00, # 0x28
# Device geometry - filled in by complete function
None, None, None, None, # 0x2c
None, None, None, None, # 0x30
None, None, None, None, # 0x34
None],
"device-id" : None,
"manufacturer-id" : 0x0089, # Intel
"max-chip-width" : 16,
"unit-size" : None,
# TODO: verify these
"intel_write_buffer" : 1,
"intel_protection_program" : 1,
"intel_configuration" : 1,
"intel_lock" : 2 # Advanced locking
},
"28F___S3" : {
"cfi-query" : [0xb0, 0x00, 0x00, 0x00, # 0x00 Sharp Manufacturer ID
0x00, 0x00, 0x00, 0x00, # 0x04
0x00, 0x00, 0x00, 0x00, # 0x08
0x00, 0x00, 0x00, 0x00, # 0x0C
0x51, 0x52, 0x59, 0x01, # 0x10
0x00, 0x31, 0x00, 0x00, # 0x14 0x15 is Pointer to Extended Query
0x00, 0x00, 0x00, 0x27, # 0x18
0x55, 0x27, 0x55, 0x03, # 0x1C
0x06, 0x0A, 0x0f, 0x04, # 0x20
0x04, 0x04, 0x04, None, # 0x24
None, None, None, None, # 0x28
None, None, None, None, # 0x2C
None,
0x50, 0x52, 0x49, 0x31, # 0x31 Extended Query
0x30, 0x0f, 0x00, 0x00, # 0x35
0x00, 0x01, 0x03, 0x00, # 0x39
0x50, 0x50], # 0x3D
"device-id" : None, #
"manufacturer-id" : 0x00b0, # Sharp Manufacturer ID is verbatim from Intel docs.
"max-chip-width" : 16, # 16-bits chips
"unit-size" : None,
"intel_write_buffer" : 1,
"intel_protection_program" : 0, # No protection command on S3
"intel_configuration" : 1,
"intel_lock" : 1 # Simple locking
},
"28F___J3A" : {
"cfi-query" : [0x89, 0x00, 0x00, 0x00, # 0x00
0x00, 0x00, 0x00, 0x00, # 0x04
0x00, 0x00, 0x00, 0x00, # 0x08
0x00, 0x00, 0x00, 0x00, # 0x0C
0x51, 0x52, 0x59, 0x01, # 0x10
0x00, 0x31, 0x00, 0x00, # 0x14
0x00, 0x00, 0x00, 0x27, # 0x18
0x36, 0x00, 0x00, 0x07, # 0x1C
0x07, 0x0A, 0x00, 0x04, # 0x20
0x04, 0x04, 0x00, None, # 0x24
None, None, None, None, # 0x28
None, None, None, None, # 0x2C
None,
0x50, 0x52, 0x49, 0x31, # 0x31 Extended Query
0x31, 0x0A, 0x00, 0x00, # 0x35
0x00, 0x01, 0x01, 0x00, # 0x39
0x33, 0x00, 0x01, 0x00, # 0x3D
0x03, 0x00], # 0x41
"device-id" : None,
"manufacturer-id" : 0x0089, # intel
"max-chip-width" : 16, # 16-bits chips
"unit-size" : None,
"intel_write_buffer" : 1,
"intel_protection_program" : 1,
"intel_configuration" : 1,
"intel_lock" : 1 # simple locking
},
"28F___J3" : {
"cfi-query" : [0x89, 0x00, 0x00, 0x00, # 0x00
0x00, 0x00, 0x00, 0x00, # 0x04
0x00, 0x00, 0x00, 0x00, # 0x08
0x00, 0x00, 0x00, 0x00, # 0x0C
0x51, 0x52, 0x59, 0x01, # 0x10
0x00, 0x31, 0x00, 0x00, # 0x14
0x00, 0x00, 0x00, 0x27, # 0x18
0x36, 0x00, 0x00, 0x08, # 0x1C
0x08, 0x0A, 0x00, 0x04, # 0x20
0x04, 0x04, 0x00, None, # 0x24
None, None, None, None, # 0x28
None, None, None, None, # 0x2C
None,
0x50, 0x52, 0x49, 0x31, # 0x31 Extended Query
0x31, 0x0A, 0x00, 0x00, # 0x35
0x00, 0x01, 0x01, 0x00, # 0x39
0x33, 0x00, 0x01, 0x80, # 0x3D
0x00, 0x03, 0x03, 0x03, # 0x41
0x00], # 0x45
"device-id" : None,
"manufacturer-id" : 0x0089, # intel
"max-chip-width" : 16, # 16-bits chips
"unit-size" : None,
"intel_write_buffer" : 1,
"intel_protection_program" : 1,
"intel_configuration" : 1,
"intel_lock" : 1 # simple locking
},
"82802-8" : { # Intel FWH
"device-id" : 0xAC,
"manufacturer-id" : 0x89, # intel
"max-chip-width" : 8,
"unit-size" : [0x10000 for i in range(16)],
"intel_write_buffer" : 1,
"intel_lock" : 1, # simple locking
"command-set": 0x0001, # Intel command-set, since no CFI structure
},
"Am29F040B" : {
"device-id" : 0xA4,
"manufacturer-id" : 0x01, # AMD
"max-chip-width" : 8, # 8-bits chips
"command-set": 0x0002, # AMD command-set, since no CFI structure
"unit-size" : [0x10000 for i in range(8)]
},
"AT49BV001A" : {
"device-id" : 0x05,
"manufacturer-id" : 0x1f, # Atmel
"max-chip-width" : 8,
"command-set" : 0x0002,
"unit-size" : [ 0x4000, 0x2000, 0x2000, 0x8000, 0x10000 ]
},
"AT49BV001AT" : {
"device-id" : 0x04,
"manufacturer-id" : 0x1f, # Atmel
"max-chip-width" : 8,
"command-set" : 0x0002,
"unit-size" : [ 0x10000, 0x8000, 0x2000, 0x2000, 0x4000 ]
},
"Am29F016D" : {
"cfi-query" : [0x01, 0x00, 0x00, 0x00, # 0x00
0x00, 0x00, 0x00, 0x00, # 0x04
0x00, 0x00, 0x00, 0x00, # 0x08
0x00, 0x00, 0x00, 0x00, # 0x0C
0x51, 0x52, 0x59, 0x02, # 0x10
0x00, 0x40, 0x00, 0x00, # 0x14
0x00, 0x00, 0x00, 0x45, # 0x18
0x55, 0x00, 0x00, 0x03, # 0x1C
0x00, 0x0A, 0x00, 0x05, # 0x20
0x00, 0x04, 0x00, 0x15, # 0x24
0x00, 0x00, 0x00, 0x00, # 0x28
0x01, 0x1F, 0x00, 0x00, # 0x2C
0x01, 0x00, 0x00, 0x00, # 0x30
0x00, 0x00, 0x00, 0x00, # 0x34
0x00, 0x00, 0x00, 0x00, # 0x38
0x00, 0x00, 0x00, 0x00, # 0x3C
0x50, 0x52, 0x49, 0x31, # 0x40
0x31, 0x00, 0x02, 0x04, # 0x44
0x01, 0x04, 0x00, 0x00, # 0x48
0x00, 0x00, 0x00, 0x00],# 0x4C
"device-id" : 0xAD,
"manufacturer-id" : 0x01, # AMD
"max-chip-width" : 8, # 8-bits chips
"unit-size" : [0x10000 for i in range(32)],
},
"Am29SL160CT" : {
"cfi-query" : [0x01, 0x00, 0x00, 0x00, # 0x00
0x00, 0x00, 0x00, 0x00, # 0x04
0x00, 0x00, 0x00, 0x00, # 0x08
0x00, 0x00, 0x00, 0x00, # 0x0C
0x51, 0x52, 0x59, 0x02, # 0x10
0x00, 0x40, 0x00, 0x00, # 0x14
0x00, 0x00, 0x00, 0x18, # 0x18
0x22, 0x00, 0x00, 0x04, # 0x1C
0x00, 0x0A, 0x00, 0x05, # 0x20
0x00, 0x04, 0x00, 0x15, # 0x24
0x02, 0x00, 0x00, 0x00, # 0x28
0x02, 0x07, 0x00, 0x20, # 0x2C
0x00, 0x1E, 0x00, 0x00, # 0x30
0x01, 0x00, 0x00, 0x00, # 0x34
0x00, 0x00, 0x00, 0x00, # 0x38
0x00, 0x00, 0x00, 0x00, # 0x3C
0x50, 0x52, 0x49, 0x31, # 0x40
0x30, 0x00, 0x02, 0x01, # 0x44
0x01, 0x04, 0x00, 0x00, # 0x48
0x00, 0x00, 0x00, 0x00],# 0x4C
"device-id" : 0x22A4,
"manufacturer-id" : 0x01, # AMD
"max-chip-width" : 16, # 16-bits chip
"unit-size" : [0x10000 for i in range(31)] + [0x2000 for i in range(8)],
},
"Am29LV640MH": {
"cfi-query" : [0x01, 0x00, 0x00, 0x00, # 0x00
0x00, 0x00, 0x00, 0x00, # 0x04
0x00, 0x00, 0x00, 0x00, # 0x08
0x00, 0x00, 0x00, 0x00, # 0x0C
0x51, 0x52, 0x59, 0x02, # 0x10
0x00, 0x40, 0x00, 0x00, # 0x14
0x00, 0x00, 0x00, 0x27, # 0x18
0x36, 0x00, 0x00, 0x07, # 0x1C
0x07, 0x0A, 0x00, 0x01, # 0x20
0x05, 0x04, 0x00, 0x17, # 0x24
0x02, 0x00, 0x05, 0x00, # 0x28
0x01, 0x7F, 0x00, 0x00, # 0x2C
0x01, 0x00, 0x00, 0x00, # 0x30
0x00, 0x00, 0x00, 0x00, # 0x34
0x00, 0x00, 0x00, 0x00, # 0x38
0x00, 0x00, 0x00, 0x00, # 0x3C
0x50, 0x52, 0x49, 0x31, # 0x40
0x33, 0x08, 0x02, 0x01, # 0x44
0x01, 0x04, 0x00, 0x00, # 0x48
0x01, 0xB5, 0xC5, 0x05, # 0x4C
0x01], # 0x50
"device-id" : [0x227E, 0x220C, 0x2201],
"manufacturer-id" : 0x01, # AMD
"max-chip-width" : 16, # 16-bits chip
"unit-size" : [0x10000 for i in range(128)],
},
"Am29LV64_D": {
"cfi-query" : [0x01, 0x00, 0x00, 0x00, # 0x00
0x00, 0x00, 0x00, 0x00, # 0x04
0x00, 0x00, 0x00, 0x00, # 0x08
0x00, 0x00, 0x00, 0x00, # 0x0C
0x51, 0x52, 0x59, 0x02, # 0x10
0x00, 0x40, 0x00, 0x00, # 0x14
0x00, 0x00, 0x00, 0x27, # 0x18
0x36, 0x00, 0x00, 0x04, # 0x1C
0x00, 0x0A, 0x00, 0x05, # 0x20
0x00, 0x04, 0x00, 0x17, # 0x24
0x01, 0x00, 0x00, 0x00, # 0x28
0x01, 0x7F, 0x00, 0x00, # 0x2C
0x01, 0x00, 0x00, 0x00, # 0x30
0x00, 0x00, 0x00, 0x00, # 0x34
0x00, 0x00, 0x00, 0x00, # 0x38
0x00, 0x00, 0x00, 0x00, # 0x3C
0x50, 0x52, 0x49, 0x31, # 0x40
0x33, 0x00, 0x02, 0x04, # 0x44
0x01, 0x04, 0x00, 0x00, # 0x48
0x00, 0xB5, 0xC5, 0x05], # 0x4C
"device-id" : 0x22D7,
"manufacturer-id" : 0x01, # AMD
"max-chip-width" : 16, # 16-bits chip
"unit-size" : [0x10000 for i in range(128)],
},
"Am29DL323G_": {
"cfi-query" : [0x01, 0x00, 0x00, 0x00, # 0x00
0x00, 0x00, 0x00, 0x00, # 0x04
0x00, 0x00, 0x00, 0x00, # 0x08
0x00, 0x00, 0x00, 0x00, # 0x0C
0x51, 0x52, 0x59, 0x02, # 0x10
0x00, 0x40, 0x00, 0x00, # 0x14
0x00, 0x00, 0x00, 0x27, # 0x18
0x36, 0x00, 0x00, 0x04, # 0x1C
0x00, 0x0A, 0x00, 0x05, # 0x20
0x00, 0x04, 0x00, 0x16, # 0x24
0x02, 0x00, 0x00, 0x00, # 0x28
0x02, 0x07, 0x00, 0x20, # 0x2C
0x00, 0x3e, 0x00, 0x00, # 0x30
0x01, 0x00, 0x00, 0x00, # 0x34
0x00, 0x00, 0x00, 0x00, # 0x38
0x00, 0x00, 0x00, 0x00, # 0x3C
0x50, 0x52, 0x49, 0x31, # 0x40
0x33, 0x04, 0x02, 0x01, # 0x44
0x01, 0x04, 0x30, 0x00, # 0x48
0x00, 0x85, 0x95, None],# 0x4C
"manufacturer-id" : 0x01, # AMD
"max-chip-width" : 16, # 16-bit chip
},
"SG29GL064M": {
"cfi-query" : [0x01, 0x00, 0x00, 0x00, # 0x00
0x00, 0x00, 0x00, 0x00, # 0x04
0x00, 0x00, 0x00, 0x00, # 0x08
0x00, 0x00, 0x00, 0x00, # 0x0C
0x51, 0x52, 0x59, 0x02, # 0x10
0x00, 0x40, 0x00, 0x00, # 0x14
0x00, 0x00, 0x00, 0x27, # 0x18
0x36, 0x00, 0x00, 0x07, # 0x1C
0x07, 0x0A, 0x00, 0x01, # 0x20
0x05, 0x04, 0x00, 0x17, # 0x24
0x02, 0x00, 0x05, 0x00, # 0x28
0x01, 0x7F, 0x00, 0x00, # 0x2C
0x01, 0x00, 0x00, 0x00, # 0x30
0x00, 0x00, 0x00, 0x00, # 0x34
0x00, 0x00, 0x00, 0x00, # 0x38
0x00, 0x00, 0x00, 0x00, # 0x3C
0x50, 0x52, 0x49, 0x31, # 0x40
0x33, 0x08, 0x02, 0x01, # 0x44
0x01, 0x04, 0x00, 0x00, # 0x48
0x01, 0xB5, 0xC5, 0x05, # 0x4C
0x01], # 0x50
"device-id" : [0x227E, 0x220C, 0x2201],
"manufacturer-id" : 0x01, # AMD
"max-chip-width" : 16, # 16-bits chip
"unit-size" : [0x10000 for i in range(128)],
},
"Am29LV160MB": {
"cfi-query" : [0x01, 0x00, 0x00, 0x00, # 0x00
0x00, 0x00, 0x00, 0x00, # 0x04
0x00, 0x00, 0x00, 0x00, # 0x08
0x00, 0x00, 0x00, 0x00, # 0x0C
0x51, 0x52, 0x59, 0x02, # 0x10
0x00, 0x40, 0x00, 0x00, # 0x14
0x00, 0x00, 0x00, 0x27, # 0x18
0x36, 0x00, 0x00, 0x07, # 0x1C
0x00, 0x0A, 0x00, 0x01, # 0x20
0x00, 0x04, 0x00, 0x15, # 0x24
0x02, 0x00, 0x00, 0x00, # 0x28
0x04, 0x00, 0x00, 0x40, # 0x2C
0x00, 0x01, 0x00, 0x20, # 0x30
0x00, 0x00, 0x00, 0x80, # 0x34
0x00, 0x1E, 0x00, 0x00, # 0x38
0x01, 0x00, 0x00, 0x00, # 0x3C
0x50, 0x52, 0x49, 0x31, # 0x40
0x33, 0x08, 0x02, 0x01, # 0x44
0x01, 0x04, 0x00, 0x00, # 0x48
0x00], # 0x4C
"device-id" : 0x2249,
"manufacturer-id" : 0x01, # AMD
"max-chip-width" : 16, # 16-bits chip
"unit-size" : [0x4000, 0x2000, 0x2000, 0x8000] + [0x10000 for i in range(31)],
},
"MBM29LV650UE": {
"cfi-query" : [0x04, 0x00, 0x00, 0x00, # 0x00
0x00, 0x00, 0x00, 0x00, # 0x04
0x00, 0x00, 0x00, 0x00, # 0x08
0x00, 0x00, 0x00, 0x00, # 0x0C
0x51, 0x52, 0x59, 0x02, # 0x10
0x00, 0x40, 0x00, 0x00, # 0x14
0x00, 0x00, 0x00, 0x27, # 0x18
0x36, 0x00, 0x00, 0x04, # 0x1C
0x00, 0x0A, 0x00, 0x05, # 0x20
0x00, 0x04, 0x00, 0x17, # 0x24
0x01, 0x00, 0x05, 0x00, # 0x28
0x01, 0x7F, 0x00, 0x00, # 0x2C
0x01, 0x00, 0x00, 0x00, # 0x30
0x00, 0x00, 0x00, 0x00, # 0x34
0x00, 0x00, 0x00, 0x00, # 0x38
0x00, 0x00, 0x00, 0x00, # 0x3C
0x50, 0x52, 0x49, 0x31, # 0x40
0x31, 0x01, 0x02, 0x04, # 0x44
0x01, 0x04, 0x00, 0x00, # 0x48
0x00, 0xB5, 0xC5, 0x05], # 0x4C
"device-id" : 0x22d7,
"manufacturer-id" : 0x04, # Spansion/Fujitsu
"max-chip-width" : 16, # 16-bits chip
"amd_ignore_cmd_address": 1,
"unit-size" : [0x10000 for i in range(128)],
},
"S29GL___N": {
"cfi-query" : [0x04, 0x00, 0x00, 0x00, # 0x00
0x00, 0x00, 0x00, 0x00, # 0x04
0x00, 0x00, 0x00, 0x00, # 0x08
0x00, 0x00, 0x00, 0x00, # 0x0C
0x51, 0x52, 0x59, 0x02, # 0x10
0x00, 0x40, 0x00, 0x00, # 0x14
0x00, 0x00, 0x00, 0x27, # 0x18
0x36, 0x00, 0x00, 0x07, # 0x1C
0x07, 0x0A, 0x00, 0x01, # 0x20
0x05, 0x04, 0x00, None, # 0x24
0x02, 0x00, 0x05, 0x00, # 0x28
0x01, None, None, 0x00, # 0x2C
0x02, 0x00, 0x00, 0x00, # 0x30
0x00, 0x00, 0x00, 0x00, # 0x34
0x00, 0x00, 0x00, 0x00, # 0x38
0x00, 0x00, 0x00, 0x00, # 0x3C
0x50, 0x52, 0x49, 0x31, # 0x40
0x33, 0x10, 0x02, 0x01, # 0x44
0x00, 0x08, 0x00, 0x00, # 0x48
0x02, 0xB5, 0xC5, None, # 0x4C
0x01], # 0x50
"device-id" : 0x22de,
"manufacturer-id" : 0x01,
"max-chip-width" : 16, # 16-bits chip
},
"Am29DL163D" : {
"cfi-query" : [0x01, 0x00, 0x00, 0x00, # 0x00
0x00, 0x00, 0x00, 0x00, # 0x04
0x00, 0x00, 0x00, 0x00, # 0x08
0x00, 0x00, 0x00, 0x00, # 0x0C
0x51, 0x52, 0x59, 0x02, # 0x10
0x00, 0x40, 0x00, 0x00, # 0x14
0x00, 0x00, 0x00, 0x27, # 0x18
0x36, 0x00, 0x00, 0x04, # 0x1C
0x00, 0x0A, 0x00, 0x05, # 0x20
0x00, 0x04, 0x00, 0x15, # 0x24
0x02, 0x00, 0x00, 0x00, # 0x28
0x02, 0x07, 0x00, 0x20, # 0x2C
0x00, 0x1E, 0x00, 0x00, # 0x30
0x01, 0x00, 0x00, 0x00, # 0x34
0x00, 0x00, 0x00, 0x00, # 0x38
0x00, 0x00, 0x00, 0x00, # 0x3C
0x50, 0x52, 0x49, 0x31, # 0x40
0x31, 0x00, 0x02, 0x01, # 0x44
0x01, 0x04, 0x18, 0x00, # 0x48
0x00, 0x85, 0x95, 0x03],# 0x4C
"device-id" : 0x28,
"manufacturer-id" : 0x01, # AMD
"max-chip-width" : 16, # 16-bits chip
"unit-size" : [0x10000 for i in range(31)] + [0x2000 for i in range(8)],
},
}
##############################
# End of flash configuration #
##############################
def obj_name(name):
if len(machine) != 0:
return machine + name
else:
return name
# return 0 if not matching, 1 if matching
def compare_product_no(str1, str2):
l1 = len(str1)
l2 = len(str2)
if l1 != l2:
return 0
else:
for i in range(0,l2):
if str1[i] != "_" and str1[i] != str2[i]:
return 0
return 1
def match_product_no(def_list, product_no):
for p in def_list.keys():
if compare_product_no(p, product_no):
return p
return None
#
# add_flash_memory arguments:
#
# * mandatory arguments
#
# name: base name for all objects (flash, ram, image)
# product_no: product number, like 28F160C3T
# interleave: 1,2,4,8
# bus_width: 8,16,32,64
# files: like image file attribute (maybe [])
# mappings: [[object, address]*] (maybe [])
#
# * optional arguments:
# mapping_template base template to use for mappings, if more values needs
# to be filled in
# ... attributes set in the generic-flash-memory objects
#
def add_flash_memory(name, product_no, interleave, bus_width, files, mappings,
mapping_template = [None, None, 0, 0, None, None],
queue = None,
accept_inquiries = 0,
accept_smaller_reads = 1,
accept_smaller_writes = 0,
big_endian = 0):
(list, size) = flash_create_memory(obj_name(name), product_no, interleave,
bus_width, files, queue,
accept_inquiries, accept_smaller_reads,
accept_smaller_writes,
big_endian)
# add objects to the list
object_list[machine] += list
# map the flash where asked to
for m in mappings:
map = get_attribute(object_list[machine], obj_name(m[0]), "map")
# fill the template
map_entry = mapping_template[:]
map_entry[0] = m[1] # address
map_entry[1] = OBJ(obj_name(name)) # translator object
map_entry[4] = size # size
map_entry[5] = OBJ(obj_name(name + "-ram")) # target object
# set the mapping
map += [map_entry]
set_attribute(object_list[machine], obj_name(m[0]), "map", map)
#
# create a list of objects representing the flash memory
# see above function for parameters
#
def flash_create_memory(name, product_no, interleave, bus_width,
files = [],
queue = None,
accept_inquiries = 0,
accept_smaller_reads = 1,
accept_smaller_writes = 0,
big_endian = 0):
# find the description
pn = match_product_no(flash_descriptions, product_no)
if not pn:
print "add_flash_memory():"
print (" No product were found matching the product number '"
+ product_no + "'")
print (" It should be one of the following (with '_' replaced by an "
"appropriate letter or number):")
print " ", flash_descriptions.keys()
return
config = copy.deepcopy(flash_descriptions[pn])
ret = complete_functions[pn](product_no, config)
if type(ret) == type(""):
print "add_flash_memory():"
print " " + ret
return
else:
(config, size) = ret
# compute the total size
size *= interleave
# complete the configuration
config['interleave'] = interleave
config['bus_width'] = bus_width
config['accept_inquiries'] = accept_inquiries
config['accept_smaller_reads'] = accept_smaller_reads
config['accept_smaller_writes'] = accept_smaller_writes
config['big_endian'] = big_endian
config['storage_ram'] = OBJ(name + "-ram")
return ([OBJECT(name + "-ram", "ram",
image = OBJ(name + "-image"))] +
iff(queue,
[OBJECT(name + "-image", "image",
queue = OBJ(queue),
size = size,
files = files)] +
[OBJECT(name,
"generic-flash-memory",
queue = OBJ(queue),
**config)],
[OBJECT(name + "-image", "image",
size = size,
files = files)] +
[OBJECT(name,
"generic-flash-memory",
**config)]),
size)
#
# Return the total size in bytes of the memory occupied by the flash system
# (useful for mappings)
#
def flash_get_size(product_no, interleave):
pn = match_product_no(flash_descriptions, product_no)
config = copy.deepcopy(flash_descriptions[pn])
ret = complete_functions[pn](product_no, config)
if type(ret) == type(""):
print "add_flash_memory():"
print " " + ret
return
else:
(config, size) = ret
size *= interleave
return size
|
python
|
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn.externals import joblib
sgids = joblib.load('sgids.pkl')
sgdiv = joblib.load('data/sg_div_tr_ns.pkl')
params = joblib.load('params.pkl')
dt = dict.fromkeys(sgids)
sgid = sgids[0]
for i, sgid in enumerate(sgids):
print(f'{sgid} ({i+1} / {len(sgids)})')
xtr, ytr, xte, yte = sgdiv[sgid]
n_neg = len(ytr[np.isclose(ytr, 0)])
n_ot = len(ytr) - n_neg
ot_weight = n_neg / n_ot
sw = np.ones_like(ytr)
sw[~np.isclose(ytr, 0)] = ot_weight
rf = RandomForestRegressor(**params)
rf.fit(xtr, ytr, sample_weight=sw)
ypred = rf.predict(xte)
dt[sgid] = (yte, ypred)
joblib.dump(dt, 'result/sg_div_ns_sp_res.pkl')
|
python
|
#-*-coding utf-8 -*-
#Affine Transformation
import numpy as np
import cv2
from matplotlib import pyplot as plt
img=cv2.imread('resimler/sudoku.png')
rows,cols,ch=img.shape
pts1 = np.float32([[50,50],[200,50],[50,200]])
pts2=np.float32([[10,100],[200,50],[100,250]])
M = cv2.getAffineTransform(pts1,pts2)
dst = cv2.warpAffine(img,M,(cols,rows))
plt.subplot(121),plt.imshow(img),plt.title('input')
plt.subplot(122),plt.imshow(dst),plt.title('output')
plt.show()
|
python
|
# /usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This file contains the implementation to the HTTP server that serves
the web API for the Serial port.
"""
import logging
import threading
import typing as t
import flask
import flask_restful
import waitress
from flask_cors import CORS
from . import connection # for typing
from . import disconnect
class EndpointExistsException(Exception):
pass
class ConnectionResource(flask_restful.Resource):
"""A custom resource object that is built to be used with `RestApiHandler` and `ConnectionRoutes`.
This class is to be extended and used like the `Resource` class.
Have `get()`, `post()`, and other methods for the types of responses you need.
"""
# typing for autocompletion
conn: connection.Connection
other: t.Dict[str, connection.Connection]
# functions will be implemented in subclasses
class RestApiHandler:
"""A handler for creating endpoints with the `Connection` and `Connection`-based objects.
This class provides the framework for adding custom endpoints for doing
custom things with the serial connection and running the local server
that will host the API. It uses a `flask_restful` object as its back end.
Note that endpoints cannot have the names `/register` or `/recall`.
Additionally, resource classes have to extend the custom `ConnectionResource` class
from this library, not the `Resource` from `flask_restful`.
`500 Internal Server Error`s will occur with endpoints dealing with the connection
if the serial port is disconnected. The server will spawn another thread that will
immediately try to reconnect the serial port if it is disconnected. However, note
that the receive and send queues will **reset** when the serial port is disconnected.
If another process accesses an endpoint while another is
currently being used, then it will respond with
`503 Service Unavailable`.
More information on [Flask](https://flask.palletsprojects.com/en/2.0.x/) and [flask-restful](https://flask-restful.readthedocs.io/en/latest/).
Register and recall endpoints:
- `/register` (GET): An endpoint to register an IP; other endpoints will result in `400` status code
if they are accessed without accessing this first (unless `has_register_recall` is False);
if an IP is already registered then this will result in `400`; IPs must call this first before
accessing serial port (unless `has_register_recall` is False)
- `/recall` (GET): After registered, can call `/recall` to "free" IP from server, allowing other IPs to
call `/register` to use the serial port
"""
def __init__(
self,
conn: connection.Connection,
has_register_recall: bool = True,
add_cors: bool = False,
catch_all_404s: bool = True,
**kwargs: t.Any,
) -> None:
"""Constructor for class
Parameters:
- `conn` (`Connection`): The `Connection` object the API is going to be associated with.
- `has_register_recall` (bool): If False, removes the `/register` and `/recall` endpoints
so the user will not have to use them in order to access the other endpoints of the API.
That is, visiting endpoints will not respond with a 400 status code even if `/register` was not
accessed. By default True.
- `add_cors` (bool): If True, then the Flask app will have [cross origin resource sharing](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS) enabled. By default False.
- `catch_all_404s` (bool): If True, then there will be JSON response for 404 errors. Otherwise, there will be a normal HTML response on 404. By default True.
- `**kwargs`, will be passed to `flask_restful.Api()`. See [here](https://flask-restful.readthedocs.io/en/latest/api.html#id1) for more info.
"""
# from above
self._conn = conn
self._has_register_recall = has_register_recall
# flask, flask_restful
self._app = flask.Flask(__name__)
self._api = flask_restful.Api(
self._app, catch_all_404s=catch_all_404s, **kwargs
)
if add_cors:
CORS(self._app)
# other
self._all_endpoints: t.List[
t.Tuple[str, t.Type[ConnectionResource]]
] = [] # list of all endpoints in tuple (endpoint str, resource class)
self._registered: t.Optional[
str
] = None # keeps track of who is registered; None if not registered
self._lock = (
threading.Lock()
) # for making sure only one thread is accessing Connection obj at a time
if has_register_recall:
# add /register and /recall endpoints
self._api.add_resource(self._register(), "/register")
self._api.add_resource(self._recall(), "/recall")
def __repr__(self) -> str:
"""Printing the API object"""
return (
f"RestApiHandler<id={hex(id(self))}>"
f"{{app={self._app}, api={self._api}, conn={self._conn}, "
f"registered={self._registered}, endpoints={self._all_endpoints}}}"
)
def add_endpoint(self, endpoint: str) -> t.Callable:
"""Decorator that adds an endpoint
This decorator should go above a class that
extends `ConnectionResource`. The class should
contain implementations of request methods such as
`get()`, `post()`, etc. similar to the `Resource`
class from `flask_restful`. To use the connection
object, use the `self.conn` attribute of the class
under the decorator.
For more information, see the `flask_restful` [documentation](https://flask-restful.readthedocs.io).
Note that duplicate endpoints will result in an exception.
If there are two classes of the same name, even in different
endpoints, the program will append underscores to the name
until there are no more repeats. For example, if one class is
named "Hello" and another class is also named "Hello",
then the second class name will be changed to "Hello_".
This happens because `flask_restful` interprets duplicate class
names as duplicate endpoints.
If another process accesses an endpoint while another is
currently being used, then it will respond with
`503 Service Unavailable`.
Parameters:
- `endpoint` (str): The endpoint to the resource. Cannot repeat.
`/register` and `/recall` cannot be used, even if
`has_register_recall` is False
"""
def _checks(resource: t.Any) -> None:
"""Checks endpoint and resource"""
# check if endpoint exists already
check = [i for i, _ in self._all_endpoints]
if endpoint in check:
raise EndpointExistsException(f'Endpoint "{endpoint}" already exists')
# check that resource is not None, if it is, did not return class
if resource is None:
raise TypeError(
"function that the decorator is above must return a class"
)
# check if resource is subclass of ConnectionResource
if not issubclass(resource, ConnectionResource):
raise TypeError("resource has to extend com_server.ConnectionResource")
# check if resource name is taken, if so, change it (flask_restful interperets duplicate names as multiple endpoints)
names = [i.__name__ for _, i in self._all_endpoints]
if resource.__name__ in names:
s = f"{resource.__name__}"
while s in names:
# append underscore until no matching
s += "_"
resource.__name__ = s
def _outer(resource: t.Type[ConnectionResource]) -> t.Type:
# checks; will raise exception if fails
_checks(resource)
# assign connection obj
resource.conn = self._conn
# req methods; _self is needed as these will be part of class functions
def _dec(func: t.Callable) -> t.Callable:
def _inner(_self, *args: t.Any, **kwargs: t.Any) -> t.Any:
ip = flask.request.remote_addr
if self._has_register_recall and (
not self._registered or self._registered != ip
):
# respond with 400 if not registered
flask_restful.abort(
400, message="Not registered; only one connection at a time"
)
elif self._lock.locked():
# if another endpoint is currently being used
flask_restful.abort(
503,
message="An endpoint is currently in use by another process.",
)
else:
with self._lock:
val = func(_self, *args, **kwargs)
return val
return _inner
# replace functions in class with new functions that check if registered
if hasattr(resource, "get"):
resource.get = _dec(resource.get)
if hasattr(resource, "post"):
resource.post = _dec(resource.post)
if hasattr(resource, "head"):
resource.head = _dec(resource.head)
if hasattr(resource, "put"):
resource.put = _dec(resource.put)
if hasattr(resource, "delete"):
resource.delete = _dec(resource.delete)
self._all_endpoints.append((endpoint, resource))
return resource
return _outer
def add_resource(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Calls `flask_restful.add_resource`.
Allows adding endpoints that do not interact with the serial port.
See [here](https://flask-restful.readthedocs.io/en/latest/api.html#flask_restful.Api.add_resource)
for more info on `add_resource` and [here](https://flask-restful.readthedocs.io)
for more info on `flask_restful` in general.
"""
return self._api.add_resource(*args, **kwargs)
def run_dev(self, logfile: t.Optional[str] = None, **kwargs: t.Any) -> None:
"""Launches the Flask app as a development server.
Not recommended because this is slower, and development features
such as debug mode and restarting do not work most of the time.
Use `run()` instead.
Parameters:
- `logfile` (str, None): The path of the file to log serial disconnect and reconnect events to.
Leave as None if you do not want to log to a file. By default None.
All arguments in `**kwargs` will be passed to `Flask.run()`.
For more information, see [here](https://flask.palletsprojects.com/en/2.0.x/api/#flask.Flask.run).
For documentation on Flask in general, see [here](https://flask.palletsprojects.com/en/2.0.x/).
Automatically disconnects the `Connection` object after
the server is closed.
Some arguments include:
- `host`: The host of the server. Ex: `localhost`, `0.0.0.0`, `127.0.0.1`, etc.
- `port`: The port to host it on. Ex: `5000` (default), `8000`, `8080`, etc.
- `debug`: If the app should be used in debug mode. Very unreliable and most likely will not work.
"""
if not self._conn.connected:
self._conn.connect() # connect the Connection obj if not connected
# register all endpoints to flask_restful
for endpoint, resource in self._all_endpoints:
self._api.add_resource(resource, endpoint)
# add disconnect handler, verbose is True
_logger = logging.getLogger("com_server_dev")
_disconnect_handler = disconnect.Reconnector(self._conn, _logger, logfile)
_disconnect_handler.start()
self._app.run(**kwargs)
self._conn.disconnect() # disconnect if stop running
def run(self, logfile: t.Optional[str] = None, **kwargs: t.Any) -> None:
"""Launches the Flask app as a Waitress production server (recommended).
Parameters:
- `logfile` (str, None): The path of the file to log serial disconnect and reconnect events to.
Leave as None if you do not want to log to a file. By default None.
All arguments in `**kwargs` will be passed to `waitress.serve()`.
For more information, see [here](https://docs.pylonsproject.org/projects/waitress/en/stable/arguments.html#arguments).
For Waitress documentation, see [here](https://docs.pylonsproject.org/projects/waitress/en/stable/).
Automatically disconnects the `Connection` object after
the server is closed.
If nothing is included, then runs on `http://0.0.0.0:8080`
"""
if not self._conn.connected:
self._conn.connect() # connect the Connection obj if not connected
# register all endpoints to flask_restful
for endpoint, resource in self._all_endpoints:
self._api.add_resource(resource, endpoint)
_logger = logging.getLogger("waitress")
# add disconnect handler, verbose is False
_disconnect_handler = disconnect.Reconnector(self._conn, _logger, logfile)
_disconnect_handler.start()
waitress.serve(self._app, **kwargs)
self._conn.disconnect() # disconnect if stop running
# backward compatibility
run_prod = run
@property
def flask_obj(self) -> flask.Flask:
"""
Gets the `Flask` object that is the backend of the endpoints and the server.
This can be used to modify and customize the `Flask` object in this class.
"""
return self._app
@property
def api_obj(self) -> flask_restful.Api:
"""
Gets the `flask_restful` API object that handles parsing the classes.
This can be used to modify and customize the `Api` object in this class.
"""
return self._api
def _register(self) -> t.Type[ConnectionResource]:
"""
Registers an IP to the server. Note that this is IP-based, not
process based, so if there are multiple process on the same computer
connecting to this, the server will not be able to detect it and may
lead to unexpected behavior.
Method: GET
Arguments:
None
Responses:
- `200 OK`: `{"message": "OK"}` if successful
- `400 Bad Request`:
- `{"message": "Double registration"}` if this endpoint is reached by an IP while it is registered
- `{"message": "Not registered; only one connection at a time"}` if this endpoint is reached while another IP is registered
"""
class _Register(ConnectionResource):
def get(_self) -> dict:
ip = flask.request.remote_addr
# check if already registered
if self._registered:
if self._registered == ip:
flask_restful.abort(400, message="Double registration")
else:
flask_restful.abort(
400, message="Not registered; only one connection at a time"
)
self._registered = ip
return {"message": "OK"}
return _Register
def _recall(self) -> t.Type[ConnectionResource]:
"""
Unregisters an IP from a server and allows other IPs to use it.
Method: GET
Arguments:
None
Responses:
- `200 OK`: `{"message": "OK}` if successful
- `400 Bad Request`:
- `{"message": "Nothing has been registered"}` if try to call without any IP registered
- `{"message": "Not same user as one in session"}` if called with different IP as the one registered
"""
class _Recall(ConnectionResource):
def get(_self) -> dict:
ip = flask.request.remote_addr
# check if not registered
if not self._registered:
flask_restful.abort(400, message="Nothing has been registered")
# check if ip matches
if ip != self._registered:
flask_restful.abort(400, message="Not same user as one in session")
self._registered = None
return {"message": "OK"}
return _Recall
|
python
|
"""
Version information for PyBioMed, created during installation.
Do not add this file to the repository.
"""
import datetime
version = '1.0'
date = 'Nov 7 9:17:16 2016'
dev = False
# Format: (name, major, min, revision)
version_info = ('PyBioMed', '1', '0', None)
# Format: a 'datetime.datetime' instance
date_info = datetime.datetime(2016, 11, 7, 9, 17)
# Format: (vcs, vcs_tuple)
vcs_info = (None, (None, None))
|
python
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import requests
import os
import zipfile
import urllib
import dirconfig
import logging
from autobuild import autobuild,git_pull
import commands
import os
from githubdownload import GithubDownload
from repositories import repositories
from ziptool import ZipTool
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='/opt/push/jpush-docs/autobuild.log',
filemode='a+')
autobuild()
def git_push():
print (os.chdir("/opt/push/jpush-docs/jpush-docs/"))
add_result= (commands.getstatusoutput("git add ."))
commit_result=(commands.getstatusoutput('git commit -m "fix the pull to push"'))
push_result= (commands.getstatusoutput("git push origin renew"))
logging.info(push_result)
if(push_result[0]):
print "fail"
else:
print "success"
print ("git push origin renew")
downloader=GithubDownload()
for file_dic in repositories:
html_content = downloader.get_html(repositories[file_dic]["url"]+"/releases")
try:
title = downloader.get_title(html_content)
logging.info("get title success")
except:
logging.info("get title fail")
zip_url = downloader.get_code(html_content)
release_time = downloader.get_time(html_content)
release_version = downloader.get_version(html_content)
zip_folder=os.path.join(dirconfig.conf["zip"],repositories[file_dic]["name"])
if(not os.path.exists(zip_folder)):
os.mkdir(zip_folder)
zip_dir=downloader.get_dir(name=repositories[file_dic]["name"],version=release_version)
zip_tool=ZipTool()
if zip_tool.is_zip_exist(zip_dir):
logging.info("the file exist,pass")
logging.info("nothing to push")
else:
logging.info(repositories[file_dic]["name"], release_version)
logging.info("the file do not exist,replace")
logging.info("git pull,update the local file")
git_pull()
zip_tool.zip_download(zip_dir,release_version,repositories[file_dic]["url"])
zip_tool.unzip_file(repositories[file_dic]["name"],release_version)
zip_tool.replace_readme(repositories[file_dic]["name"],release_version)
git_push()
logging.info(repositories[file_dic]["name"],release_version)
logging.info("git push,update the remote file")
|
python
|
"""
Unit test for LocalProcessMemoryConsumption measurement.
"""
import os
import time
import pytest
import threading
import subprocess
from mlte._private.platform import is_windows, is_nix
from mlte.measurement.memory import LocalProcessMemoryConsumption
from mlte.measurement.memory.local_process_memory_consumption import (
MemoryStatistics,
)
from mlte.measurement.validation import Validator, Success, Failure
from ...support.meta import path_to_support
# The spin duration, in seconds
SPIN_DURATION = 5
def spin_for(seconds: int):
"""Run the spin.py program for `seconds`."""
path = os.path.join(path_to_support(), "spin.py")
prog = subprocess.Popen(["python", path, f"{seconds}"])
thread = threading.Thread(target=lambda: prog.wait())
thread.start()
return prog
@pytest.mark.skipif(
is_windows(), reason="ProcessLocalCPUUtilization not supported on Windows."
)
def test_memory_nix_evaluate():
start = time.time()
prog = spin_for(5)
prop = LocalProcessMemoryConsumption()
# Capture memory consumption; blocks until process exit
stat = prop.evaluate(prog.pid)
assert len(str(stat)) > 0
assert int(time.time() - start) >= SPIN_DURATION
@pytest.mark.skipif(
is_windows(), reason="ProcessLocalCPUUtilization not supported on Windows."
)
def test_memory_nix_validate_success():
prog = spin_for(5)
prop = LocalProcessMemoryConsumption().with_validator(
Validator("Succeed", lambda _: Success())
)
# Capture memory consumption; blocks until process exit
results = prop.validate(prog.pid)
assert len(results) == 1
assert bool(results[0])
result = results[0]
assert isinstance(result.data, MemoryStatistics)
@pytest.mark.skipif(
is_windows(), reason="ProcessLocalCPUUtilization not supported on Windows."
)
def test_memory_nix_validate_failure():
prog = spin_for(5)
prop = LocalProcessMemoryConsumption().with_validator(
Validator("Fail", lambda _: Failure())
)
# Capture memory consumption; blocks until process exit
results = prop.validate(prog.pid)
assert len(results) == 1
assert not bool(results[0])
result = results[0]
assert isinstance(result.data, MemoryStatistics)
@pytest.mark.skipif(
is_nix(), reason="ProcessLocalCPUUtilization not supported on Windows."
)
def test_memory_windows_evaluate():
with pytest.raises(RuntimeError):
_ = LocalProcessMemoryConsumption()
@pytest.mark.skipif(
is_nix(), reason="ProcessLocalCPUUtilization not supported on Windows."
)
def test_memory_windows_validate():
with pytest.raises(RuntimeError):
_ = LocalProcessMemoryConsumption()
|
python
|
from .emonet import EmoNetPredictor
__version__ = '0.1.0'
|
python
|
"""Utilities for building command-line interfaces for your daemons"""
import inspect
from functools import wraps
import click
from .core import Daemon
from .helpers import MultiDaemon
def _parse_cli_options(func):
"""Parse click options from a function signature"""
options = []
for param in inspect.signature(func).parameters.values():
if param.kind not in {param.POSITIONAL_OR_KEYWORD, param.KEYWORD_ONLY}:
# Only keyword arguments are currently supported
continue
option_name = '--' + param.name.lower().replace('_', '-').strip('-')
kwargs = {}
if param.annotation in {str, int, float, bool}:
# Only basic types are currently supported
kwargs['type'] = param.annotation
if param.default != param.empty:
kwargs['default'] = param.default
else:
# If the param doesn't have a default, then it's required
kwargs['required'] = True
if param.annotation == bool or isinstance(param.default, bool):
if param.default is True:
# If the default of a boolean option is ``True``, then add a
# ``--no-x` off switch
option_name += '/--no-' + option_name.lstrip('-')
else:
# If the default is ``False``, just make it a basic flag
kwargs['is_flag'] = True
args = (option_name, param.name)
options.append((args, kwargs))
# Reverse it so the decorators are applied in the correct order
return options[::-1]
class DaemonCLI(click.MultiCommand):
"""A Command class for `click <http://click.pocoo.org/>`_.
This class automatically adds start, stop, restart, and status
subcommands for daemons.
"""
def __init__(
self, callback=None, daemon_params=None, is_worker=True,
daemon_class=Daemon, daemon=None, **kwargs):
"""Create a new DaemonCLI object."""
daemon_params = daemon_params or {}
if daemon is None:
self.daemon = daemon_class(**daemon_params)
else:
self.daemon = daemon
self.is_worker = (
is_worker and callback is not None and callable(callback))
if ((not self.daemon.worker or not callable(self.daemon.worker)) and
self.is_worker):
# If the callback is the worker, then don't pass the
# callback to the parent class so we don't call it twice
self.daemon.worker = callback
callback = None
# The context object will be the Daemon object
context_settings = {'obj': self.daemon}
if not kwargs.get('help'):
kwargs['help'] = self.daemon.worker.__doc__
super(DaemonCLI, self).__init__(
callback=callback, context_settings=context_settings, **kwargs)
def list_commands(self, ctx):
"""Get a list of subcommands."""
return self.daemon.list_actions()
def get_command(self, ctx, name):
"""Get a callable command object."""
if name not in self.daemon.list_actions():
return None
action = self.daemon.get_action(name)
@wraps(action)
def command(*args, **kwargs):
return action(*args, **kwargs)
if name in {'start', 'stop', 'restart'}:
if name in {'start', 'restart'}:
command = click.option(
'--debug', is_flag=True,
help='Do NOT detach and run in the background.',
)(command)
if name in {'stop', 'restart'}:
command = click.option(
'--force', is_flag=True,
help='Kill the daemon forcefully after the timeout.',
)(command)
command = click.option(
'--timeout', type=int, default=None,
help=('Number of seconds to wait for the daemon to stop. '
'Overrides "stop_timeout" from daemon definition.'),
)(command)
if isinstance(self.daemon, MultiDaemon):
command = click.option(
'--worker-id', type=int, default=None,
help='The ID of the worker to {}.'.format(name),
)(command)
elif name == 'status':
command = click.option(
'--fields', type=str, default=None,
help='Comma-separated list of process info fields to display.',
)(command)
command = click.option(
'--json', is_flag=True,
help='Show the status in JSON format.',
)(command)
if isinstance(self.daemon, MultiDaemon):
command = click.option(
'--worker-id', type=int, default=None,
help='The ID of the worker whose status to get.',
)(command)
else:
# This is a custom action so try to parse the CLI options
# by inspecting the function
for option_args, option_kwargs in _parse_cli_options(action):
command = click.option(
*option_args, **option_kwargs)(command)
# Make it into a click command
command = click.command(name)(command)
return command
def cli(**daemon_params):
return click.command(cls=DaemonCLI, daemon_params=daemon_params)
# Make a pass decorator for passing the Daemon object
pass_daemon = click.make_pass_decorator(Daemon)
|
python
|
"""This module provide multiple test of ingestion services"""
import logging
from conftest import ValueStorage
import time
from server_automation.configuration import config
from server_automation.functions import executors
from server_automation.postgress import postgress_adapter
_log = logging.getLogger("server_automation.tests.test_ingestion_discrete")
initial_mapproxy_config = postgress_adapter.get_mapproxy_configs()
def test_manuel_discrete_ingest():
"""
This test will test full e2e discrete ingestion
"""
# config.TEST_ENV = 'PROD'
# prepare test data
try:
resp = executors.init_ingestion_src(config.TEST_ENV)
error_msg = None
except Exception as e:
resp = None
error_msg = str(e)
assert (
resp
), f"Test: [{test_manuel_discrete_ingest.__name__}] Failed: on creating and updating layerSource folder [{error_msg}]"
_log.info(f"{resp}")
# triggering and validate start of new manuel job
product_id, product_version = resp["resource_name"].split("-")
ValueStorage.discrete_list.append(
{"product_id": product_id, "product_version": product_version}
)
source_directory = resp["ingestion_dir"]
_log.info(f"{product_id} {product_version}")
time.sleep(5)
try:
status_code, content, source_data = executors.start_manual_ingestion(
source_directory, config.TEST_ENV
)
except Exception as e:
status_code = "unknown"
content = str(e)
assert status_code == config.ResponseCode.Ok.value, (
f"Test: [{test_manuel_discrete_ingest.__name__}] Failed: trigger new ingest with status code: [{status_code}]\n"
f"details: [{content}]"
)
# validating following and completion of ingestion job
try:
ingestion_follow_state = executors.follow_running_task(
product_id, product_version
)
resp = ingestion_follow_state["status"] == config.JobStatus.Completed.name
error_msg = ingestion_follow_state["message"]
except Exception as e:
resp = None
error_msg = str(e)
assert (
resp
), f"Test: [{test_manuel_discrete_ingest.__name__}] Failed: on following ingestion process [{error_msg}]"
# validate new discrete on pycsw records
try:
resp, pycsw_record = executors.validate_pycsw(
config.GQK_URL, product_id, source_data
)
state = resp["validation"]
error_msg = resp["reason"]
except Exception as e:
state = False
error_msg = str(e)
assert state, (
f"Test: [{test_manuel_discrete_ingest.__name__}] Failed: validation of pycsw record\n"
f"related errors:\n"
f"{error_msg}"
)
# validating new discrete on mapproxy
# try:
# resp = executors.validate_new_discrete(pycsw_record, product_id, product_version)
# state = resp['validation']
# error_msg = resp['reason']
# except Exception as e:
# state = False
# error_msg = str(e)
#
# assert state, f'Test: [{test_manuel_discrete_ingest.__name__}] Failed: validation of mapproxy layer\n' \
# f'related errors:\n' \
# f'{error_msg}'
if config.DEBUG_MODE_LOCAL:
executors.cleanup_env(product_id, product_version, initial_mapproxy_config)
def test_watch_discrete_ingest():
"""
This test ingestion by watching shared folder
"""
# config.TEST_ENV = 'PROD'
# stop watching folder as prerequisites
try:
resp = executors.stop_watch()
state = resp["state"]
error_msg = resp["reason"]
except Exception as e:
state = False
error_msg = str(e)
assert (
state
), f"Test: [{test_watch_discrete_ingest.__name__}] Failed: on stop agent watch [{error_msg}]"
try:
resp = executors.init_watch_ingestion_src(config.TEST_ENV)
error_msg = None
except Exception as e:
resp = None
error_msg = str(e)
assert (
resp
), f"Test: [{test_watch_discrete_ingest.__name__}] Failed: on creating and updating layerSource folder [{error_msg}]"
_log.info(f"{resp}")
# triggering and validate start of new manuel job
product_id, product_version = resp["resource_name"].split("-")
ValueStorage.discrete_list.append(
{"product_id": product_id, "product_version": product_version}
)
source_directory = resp["ingestion_dir"]
_log.info(f"{product_id} {product_version}")
try:
state, content, source_data = executors.start_watch_ingestion(
source_directory, config.TEST_ENV
)
except Exception as e:
status_code = "unknown"
content = str(e)
assert state, (
f"Test: [{test_watch_discrete_ingest.__name__}] Failed: Trigger ingest process from watch agent: [{status_code}]\n"
f"details: [{content}]"
)
time.sleep(config.SYSTEM_DELAY) # validate generation of new job
# validating following and completion of ingestion job
try:
ingestion_follow_state = executors.follow_running_task(
product_id, product_version
)
resp = ingestion_follow_state["status"] == config.JobStatus.Completed.name
error_msg = ingestion_follow_state["message"]
except Exception as e:
resp = None
error_msg = str(e)
assert (
resp
), f"Test: [{test_watch_discrete_ingest.__name__}] Failed: on following ingestion process [{error_msg}]"
print(
f"Test: [{test_watch_discrete_ingest.__name__}] Failed: on following ingestion process [{error_msg}]"
)
# validate new discrete on pycsw records
try:
resp, pycsw_record = executors.validate_pycsw(
config.GQK_URL, product_id, source_data
)
state = resp["validation"]
error_msg = resp["reason"]
except Exception as e:
state = False
error_msg = str(e)
assert state, (
f"Test: [{test_watch_discrete_ingest.__name__}] Failed: validation of pycsw record\n"
f"related errors:\n"
f"{error_msg}"
)
# validating new discrete on mapproxy
try:
resp = executors.validate_new_discrete(
pycsw_record, product_id, product_version
)
state = resp["validation"]
error_msg = resp["reason"]
except Exception as e:
state = False
error_msg = str(e)
##### enable after new version of ingestion with mapproxy live update
# time.sleep(60) # let mapproxy upload new changes
# assert state, f'Test: [{test_watch_discrete_ingest.__name__}] Failed: validation of mapproxy layer\n' \
# f'related errors:\n' \
# f'{error_msg}'
resp = executors.stop_watch()
if config.DEBUG_MODE_LOCAL:
executors.cleanup_env(product_id, product_version, initial_mapproxy_config)
def teardown_module(module): # pylint: disable=unused-argument
"""
This method been executed after test running - env cleaning
"""
if config.CLEAN_UP:
for p in ValueStorage.discrete_list:
executors.cleanup_env(
p["product_id"], p["product_version"], initial_mapproxy_config
)
# ToDo: Uncomment those only
# if config.DEBUG_MODE_LOCAL:
# test_manuel_discrete_ingest()
# test_watch_discrete_ingest()
|
python
|
import jax
from absl import app
from absl import flags
flags.DEFINE_string('server_addr', '', help='server ip addr')
flags.DEFINE_integer('num_hosts', 1, help='num of hosts' )
flags.DEFINE_integer('host_idx', 0, help='index of current host' )
FLAGS = flags.FLAGS
def main(argv):
jax.distributed.initialize(FLAGS.server_addr, FLAGS.num_hosts, FLAGS.host_idx)
print(f"device count:{jax.device_count()}")
print(f"process index:{jax.process_index()}")
print(jax.devices())
if __name__ == '__main__':
app.run(main)
|
python
|
"""
This script prepares the files that populate the databases, such as adding headers
to them and removing a couple of noun phrases that do not belong there.
"""
def prepare_data(file):
"""
Prepares the argument file for SQL insertion
"""
with open(file, 'r', encoding='utf-8') as infile:
infile = infile.read().splitlines()
if file == 'islex_fletta_ofl.csv':
pass
elif file == 'ordmyndir.txt':
if infile[0] != 'word_form':
infile = ['word_form'] + infile
else:
pass
elif file == 'SHsnid.csv':
if infile[0] != ['lemma;id;gender;type;word_form;pos']:
infile = ['lemma;id;gender;type;word_form;pos'] + infile
elif file == 'all_filters.txt':
if infile[0] != 'filter':
infile = ['filter'] + infile
else:
pass
with open(file, 'w', encoding='utf-8') as outfile:
if file == 'SHsnid.csv':
for row in infile:
outfile.write(row + '\n')
else:
for row in infile:
if row in ['"háls-, nef- og eyrnalæknir","n m"',
'"fóta-, handa- og munnveiki","n f"',
'"einn, tveir og þrír","adv"']:
continue
data = row.replace('"', '')
outfile.write(data + '\n')
if __name__ == '__main__':
pass
|
python
|
#
# This file is part of Brazil Data Cube Validation Tools.
# Copyright (C) 2020 INPE.
#
# Python Native
import os
import time
import sys
# 3rdparty
import gdal
import numpy as np
def diff_sum_abs(image_path1,image_path2, output_folder):
'''
documentar para sair automaticamente
'''
# get o stk de bandas .tif
for filename in os.listdir(image_path1):
if filename.endswith('.tif'):
i1 = filename
ds1 = gdal.Open(os.path.join(image_path1, filename))
# Set current working dir to second image
os.chdir(image_path2)
for filename in os.listdir(image_path2):
if filename.endswith('.tif'):
i2 = filename
ds2 = gdal.Open(os.path.join(image_path2, filename))
# Create GTIF file
driver = gdal.GetDriverByName("GTiff")
#conta numero de bandas
numbands = ds1.RasterCount
print(numbands)
#define nome do output
# cria o nome do arquivo de saída
output_file = os.path.basename(i1) + "_DIF_ABS_" + os.path.basename(i2)
print(output_file)
#ref t1 banda 1
xsize = ds1.RasterXSize
ysize = ds1.RasterYSize
#cria a imagem de saída
os.chdir(output_folder)
dataset = driver.Create(output_file, xsize, ysize, 1, gdal.GDT_Float32)
# follow code is adding GeoTranform and Projection
geotrans=ds1.GetGeoTransform() #get GeoTranform from existed 'data0'
proj=ds1.GetProjection() #you can get from a exsited tif or import
dataset.SetGeoTransform(geotrans)
dataset.SetProjection(proj)
#cria lista vazia para receber os dados
results = []
os.chdir(output_folder)
for band in range(numbands):
#ds1 = gdal.Open(os.path.join(img1, listbands1[band]))
#ds2 = gdal.Open(os.path.join(img2, listbands2[band]))
bandtar = ds1.GetRasterBand(band+1).ReadAsArray()
bandref = ds2.GetRasterBand(band+1).ReadAsArray()
# transforma para float
bandtar = bandtar.astype(float)
bandref = bandref.astype(float)
#bandtar = np.array(ds1.GetRasterBand(band).ReadAsArray().astype(float))
#bandref = np.array(ds2.GetRasterBand(band).ReadAsArray().astype(float))
results.append(np.abs(bandtar - bandref))
diff_abs_sum = np.sum(results, axis=0)
dataset.GetRasterBand(1).WriteArray(diff_abs_sum)
if __name__ == '__main__':
if len(
sys.argv) <= 3: # aqui fazes a verificacao sobre quantos args queres receber, o nome do programa conta como 1
print('Argumentos insuficientes para rodar a função')
sys.exit()
print('STARTED diff_sum_abs')
start = time.time()
image_path1, image_path2, output_folder = sys.argv[1], sys.argv[2], sys.argv[3]
diff_sum_abs(image_path1, image_path2, output_folder)
# limpa as variáveis
ds1 = None
ds2 = None
bandref = None
bandtar = None
dataset = None
end = time.time()
print('ENDED')
print('TOTAL ELAPSED TIME: {}'.format(end - start))
|
python
|
from django import forms
class LoginForm(forms.Form):
email = forms.EmailField(required = True, error_messages = {"required": "邮箱不能为空"})
password = forms.CharField(required = True, min_length = 6, max_length = 20, error_messages={
"min_length": "密码应该大于等于6个字符",
"max_length": "密码应该小于等于20个字符",
})
class RegisterForm(forms.Form):
email = forms.EmailField(required = True, error_messages = {"required": "邮箱不能为空"})
password = forms.CharField(required = True, min_length = 6, max_length = 20, error_messages={
"min_length": "密码应该大于等于6个字符",
"max_length": "密码应该小于等于20个字符",
})
|
python
|
'''
This code is for head detection for PAN.
'''
import torch
import torch.nn as nn
import math
__all__ = ['PA_Head']
class PA_Head(nn.Module):
def __init__(self, in_channels, hidden_dim, num_classes):
super(PA_Head, self).__init__()
self.conv1 = nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(hidden_dim)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(hidden_dim, num_classes, kernel_size=1, stride=1, padding=0)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, f):
out = self.conv1(f)
out = self.relu1(self.bn1(out))
out = self.conv2(out)
return out
|
python
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests to cover SiteService."""
__author__ = '[email protected] (Joseph DiLallo)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
import unittest
from adspygoogle.common import Utils
from tests.adspygoogle.dfa.v1_18 import client
from tests.adspygoogle.dfa.v1_18 import HTTP_PROXY
from tests.adspygoogle.dfa.v1_18 import SERVER_V1_18
from tests.adspygoogle.dfa.v1_18 import VERSION_V1_18
class SiteServiceTestV1_18(unittest.TestCase):
"""Unittest suite for SiteService using v1_18."""
SERVER = SERVER_V1_18
VERSION = VERSION_V1_18
client.debug = False
service = None
site_id = '0'
directory_site_id = '0'
user_self = None
def setUp(self):
"""Prepare unittest."""
print self.id()
if not self.__class__.service:
self.__class__.service = client.GetSiteService(
self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY)
if self.__class__.user_self is None:
user_service = client.GetUserService(
self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY)
search_criteria = {
'searchString': client._headers['Username']
}
self.__class__.user_self = user_service.GetUsersByCriteria(
search_criteria)[0]['records'][0]
def testSaveDfaSite(self):
"""Test whether we can save a site"""
site = {
'name': 'Site #%s' % Utils.GetUniqueName(),
'countryId': '256', # USA
'keyname': 'http://www.example.com'
}
site = self.__class__.service.SaveDfaSite(site)
self.__class__.site_id = site[0]['id']
self.assert_(isinstance(site, tuple))
def testGetDfaSite(self):
"""Test whether we can fetch a site by id."""
if self.__class__.site_id == '0':
self.testSaveDfaSite()
site_id = self.__class__.site_id
self.assert_(isinstance(self.__class__.service.GetDfaSite(
site_id), tuple))
def testGetDfaSites(self):
"""Test whether we can fetch sites by criteria."""
if self.__class__.site_id == '0':
self.testSaveDfaSite()
search_criteria = {
'ids': [self.__class__.site_id]
}
self.assert_(isinstance(self.__class__.service.GetDfaSites(
search_criteria), tuple))
def testGetSitesByCriteria(self):
"""Test whether we can fetch sites by criteria."""
search_criteria = {}
results = self.__class__.service.GetSitesByCriteria(search_criteria)
self.assert_(isinstance(results, tuple))
self.__class__.directory_site_id = results[0]['records'][0]['id']
def testGetAvailableDfaSiteContactTypes(self):
"""Test whether we can fetch available DFA site contact types."""
self.assert_(isinstance(
self.__class__.service.GetAvailableDfaSiteContactTypes(), tuple))
def testGetContacts(self):
"""Test whether we can fetch contacts."""
contact_search_criteria = {
'pageSize': '10',
'pageNumber': '1'
}
self.assert_(isinstance(
self.__class__.service.GetContacts(contact_search_criteria), tuple))
if __name__ == '__main__':
unittest.main()
|
python
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = "0xdeadbeefdeadbeef"
DEBUG = True
INSTALLED_APPS = [
"django_jsonfield_backport",
"tests",
]
DATABASES = {
"default": {
"ENGINE": "django.db.backends.%s" % os.getenv("DB_BACKEND"),
"NAME": os.getenv("DB_NAME"),
"USER": os.getenv("DB_USER"),
"PASSWORD": os.getenv("DB_PASSWORD"),
"HOST": os.getenv("DB_HOST", ""),
"PORT": os.getenv("DB_PORT", ""),
"TEST": {
"USER": "default_test",
"TBLSPACE": "default_test_tbls",
"TBLSPACE_TMP": "default_test_tbls_tmp",
},
},
}
DATABASE_ROUTERS = ["tests.models.MyRouter"]
SILENCED_SYSTEM_CHECKS = ["django_jsonfield_backport.W001"]
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
|
python
|
# Given a linked list which might contain a loop, implement an algorithm that returns the node at the beginning of the loop (if one exists)
from utils import Node
first_node = Node(1)
second_node = Node(2)
third_node = Node(3)
fourth_node = Node(4)
fifth_node = Node(5)
sixth_node = Node(6)
# Visual [1, 2, 3, 4, 5, 6, 4]
# (The second four is the same as the first, hence the loop)
first_node.next = second_node
second_node.next = third_node
third_node.next = fourth_node
fourth_node.next = fifth_node
fifth_node.next = sixth_node
sixth_node.next = fourth_node
def loop_detection(head):
cursor = head
fast = head
slow = head
while fast and fast.next:
if fast is None or slow is None:
return None
fast = fast.next.next
slow = slow.next
if fast is slow:
break
slow = head
while fast is not slow:
fast = fast.next
slow = slow.next
print(f"Node a start of loop value: {fast.value}")
return fast
print(loop_detection(first_node))
|
python
|
#!/usr/bin/env python
# Plots Sigmoid vs. Probit.
import matplotlib.pyplot as pl
import numpy as np
from scipy.special import expit
from scipy.stats import norm
x = np.arange(-6, 6, 0.1)
l = np.sqrt(np.pi/8);
pl.plot(x, expit(x), 'r-', label='sigmoid')
pl.plot(x, norm.cdf(l*x), 'b--', label='probit')
pl.axis([-6, 6, 0, 1])
pl.legend()
pl.savefig('probitPlot.png')
pl.show()
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
from PySide6 import QtCore, QtWidgets
class MyWidget(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.text = QtWidgets.QLabel("Hello World", alignment=QtCore.Qt.AlignCenter)
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.addWidget(self.text)
if __name__ == "__main__":
app = QtWidgets.QApplication([])
widget = MyWidget()
widget.resize(800, 600)
widget.show()
sys.exit(app.exec())
|
python
|
# Author: Bohua Zhan
import unittest
from kernel.type import boolT, TVar, TFun
from kernel.term import Term, Var
from kernel.thm import Thm
from kernel.proof import ProofItem
from logic.proofterm import ProofTerm, ProofTermAtom
from logic import basic
from logic.logic import conj, disj, mk_if
from logic.nat import natT, plus, zero
from server import tactic
from syntax import printer
thy = basic.load_theory('nat')
class TacticTest(unittest.TestCase):
def testRule(self):
A = Var('A', boolT)
B = Var('B', boolT)
goal = Thm([], conj(B, A))
pt = tactic.rule().get_proof_term(thy, ProofTerm.sorry(goal), args='conjI')
prf = pt.export()
self.assertEqual(thy.check_proof(prf), goal)
def testRule2(self):
A = Var('A', boolT)
B = Var('B', boolT)
goal = Thm([], disj(B, A))
prev = ProofTermAtom(0, Thm.assume(disj(A, B)))
pt = tactic.rule().get_proof_term(thy, ProofTerm.sorry(goal), args='disjE', prevs=[prev])
prf = pt.export(prefix=(1,), subproof=False)
self.assertEqual(prf.items[2], ProofItem(3, 'apply_theorem', args='disjE', prevs=[0, 1, 2]))
def testRule3(self):
A = Var('A', boolT)
B = Var('B', boolT)
goal = Thm([], disj(B, A))
prev = ProofTermAtom(0, Thm.assume(B))
pt = tactic.rule().get_proof_term(thy, ProofTerm.sorry(goal), args='disjI1', prevs=[prev])
prf = pt.export(prefix=(1,), subproof=False)
self.assertEqual(prf.items[0], ProofItem(1, 'apply_theorem_for', args=('disjI1', {}, {'A': B, 'B': A}), prevs=[0]))
def testRule4(self):
n = Var('n', natT)
goal = Thm([], Term.mk_equals(plus(n, zero), n))
inst = {'P': Term.mk_abs(n, goal.prop), 'x': n}
pt = tactic.rule().get_proof_term(thy, ProofTerm.sorry(goal), args=('nat_induct', ({}, inst)))
prf = pt.export()
self.assertEqual(thy.check_proof(prf), goal)
def testIntros(self):
Ta = TVar('a')
x = Var('x', Ta)
P = Var('P', TFun(Ta, boolT))
Q = Var('Q', TFun(Ta, boolT))
goal = Thm([], Term.mk_all(x, Term.mk_implies(P(x), Q(x))))
intros_tac = tactic.intros()
pt = intros_tac.get_proof_term(thy, ProofTerm.sorry(goal), args=['x'])
prf = pt.export()
self.assertEqual(thy.check_proof(prf), goal)
def testInduct(self):
n = Var('n', natT)
goal = Thm([], Term.mk_equals(plus(n, zero), n))
induct_tac = tactic.var_induct()
pt = induct_tac.get_proof_term(thy, ProofTerm.sorry(goal), args=('nat_induct', n))
prf = pt.export()
self.assertEqual(thy.check_proof(prf), goal)
def testRewrite(self):
n = Var('n', natT)
goal = Thm.mk_equals(plus(zero, n), n)
rewrite_tac = tactic.rewrite()
pt = rewrite_tac.get_proof_term(thy, ProofTerm.sorry(goal), args='plus_def_1')
prf = pt.export()
self.assertEqual(thy.check_proof(prf), goal)
def testRewrite2(self):
Ta = TVar("a")
a = Var("a", Ta)
b = Var("b", Ta)
eq_a = Term.mk_equals(a, a)
goal = Thm.mk_equals(mk_if(eq_a, b, a), b)
rewrite_tac = tactic.rewrite()
pt = rewrite_tac.get_proof_term(thy, ProofTerm.sorry(goal), args='if_P')
prf = pt.export()
self.assertEqual(prf.items[0], ProofItem(0, 'sorry', th=Thm.mk_equals(a, a)))
self.assertEqual(thy.check_proof(prf), goal)
def testCases(self):
A = Var('A', boolT)
B = Var('B', boolT)
C = Var('C', boolT)
cases_tac = tactic.cases()
pt = cases_tac.get_proof_term(thy, ProofTerm.sorry(Thm([B], C)), args=A)
prf = pt.export()
self.assertEqual(thy.check_proof(prf), Thm([B], C))
if __name__ == "__main__":
unittest.main()
|
python
|
# -*- coding: utf-8 -*- #
# Copyright 2021 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create command for Backup for GKE restore."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.container.backup_restore import util as api_util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.container.backup_restore import resource_args
from googlecloudsdk.command_lib.util.args import labels_util
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Create(base.CreateCommand):
"""Creates a restore.
Creates a Backup for GKE restore.
## EXAMPLES
To create a restore ``my-restore'' in location ``us-central1'' under restore
plan ``my-restore-plan'', run:
$ {command} my-restore --project=my-project --location=us-central1
--restore-plan=my-restore-plan
"""
@staticmethod
def Args(parser):
resource_args.AddRestoreArg(parser)
group = parser.add_group(mutex=True)
group.add_argument(
'--async',
required=False,
action='store_true',
default=False,
help="""
Return immediately, without waiting for the operation in progress to
complete.
""")
group.add_argument(
'--wait-for-completion',
required=False,
action='store_true',
default=False,
help='Wait for the created restore to complete.')
# TODO(b/205222596): Figure out if we can/should use the relative name of
# the backup. This would potentially require the CLI to first get the backup
# plan referenced in the parent restore plan and then concat it with the
# user input relative name.
parser.add_argument(
'--backup',
type=str,
required=True,
help="""
Name of the backup from which to restore under the backup plan specified
in restore plan.
Format: projects/<project>/locations/<location>/backupPlans/<backupPlan>/backups/<backup>.
""")
parser.add_argument(
'--description',
type=str,
required=False,
default=None,
help='Optional text description for the restore.')
labels_util.AddCreateLabelsFlags(parser)
def Run(self, args):
labels = labels_util.GetUpdateLabelsDictFromArgs(args)
restore_ref = args.CONCEPTS.restore.Parse()
if args.IsSpecified('async'):
return api_util.CreateRestore(
restore_ref,
backup=args.backup,
description=args.description,
labels=labels)
api_util.CreateRestoreAndWaitForLRO(
restore_ref,
backup=args.backup,
description=args.description,
labels=labels)
if not args.IsSpecified('wait_for_completion'):
return []
return api_util.WaitForRestoreToFinish(restore_ref.RelativeName())
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 27 18:11:58 2021
@author: TSAI, TUNG-CHEN
@update: 2021/10/05
"""
import numpy as np
from .nn import get_network
from .utils import get_inputs, print_prediction
# =============================================================================
#
# =============================================================================
def infer(modelname: str, data: dict) -> np.ndarray:
inputs = get_inputs(modelname, data)
network = get_network(modelname)
results = network.infer(inputs)
print('\nPrediction:')
print_prediction(data, results, label_type='name')
return results
|
python
|
"""Mock callback module to support device and state testing."""
import logging
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-argument
# pylint: disable=too-many-instance-attributes
class MockCallbacks():
"""Mock callback class to support device and state testing."""
def __init__(self):
"""Init the MockCallbacks Class."""
self.callbackvalue1 = None
self.callbackvalue2 = None
self.callbackvalue3 = None
self.callbackvalue4 = None
self.callbackvalue5 = None
self.callbackvalue6 = None
self.callbackvalue7 = None
self.callbackvalue8 = None
self.callbackvalue9 = None
def callbackmethod1(self, addr, state, value):
"""Receive notice of callback method 1."""
self._report_callback(1, addr, state, value)
self.callbackvalue1 = value
def callbackmethod2(self, addr, state, value):
"""Receive notice of callback method 2."""
self._report_callback(2, addr, state, value)
self.callbackvalue2 = value
def callbackmethod3(self, addr, state, value):
"""Receive notice of callback method 3."""
self._report_callback(3, addr, state, value)
self.callbackvalue3 = value
def callbackmethod4(self, addr, state, value):
"""Receive notice of callback method 5."""
self._report_callback(4, addr, state, value)
self.callbackvalue4 = value
def callbackmethod5(self, addr, state, value):
"""Receive notice of callback method 5."""
self._report_callback(5, addr, state, value)
self.callbackvalue5 = value
def callbackmethod6(self, addr, state, value):
"""Receive notice of callback method 6."""
self._report_callback(6, addr, state, value)
self.callbackvalue6 = value
def callbackmethod7(self, addr, state, value):
"""Receive notice of callback method 7."""
self._report_callback(7, addr, state, value)
self.callbackvalue7 = value
def callbackmethod8(self, addr, state, value):
"""Receive notice of callback method 8."""
self._report_callback(8, addr, state, value)
self.callbackvalue8 = value
def callbackmethod9(self, addr, state, value):
"""Receive notice of callback method 9."""
_LOGGER.debug('Called method 9 callback')
self.callbackvalue9 = value
@staticmethod
def _report_callback(callback, addr, state, value):
_LOGGER.debug('Called method %d for address %s group %s value %s',
callback, addr, state, value)
|
python
|
# This file will grab the hashes from the file generation and compare them to
# hashes gotten from user-given files. Can return positive (files that do match)
# or negative (files that don't match) hash results
import argparse
import os
import sys
from file_object import FileObject
def log_and_print(log_file, log_entry, newline=True):
if newline:
log_file.write(log_entry + "\n")
print(log_entry)
else:
log_file.write(log_entry)
print(log_entry, end="", flush=True)
# Creates a 2D list of the user-submitted hashes. Will return a list of lists.
# If one of the files for hashes wasn't included, that spot will be an empty list
def create_hash_lists(args):
master_hash_list = list()
if args.md5file != "None":
md5_hashes = return_list_from_file(args.md5file)
master_hash_list.append(md5_hashes)
else:
md5_hashes = ["None"]
master_hash_list.append(md5_hashes)
if args.sha256file != 'None':
sha256_hashes = return_list_from_file(args.sha256file)
master_hash_list.append(sha256_hashes)
else:
sha256_hashes = ["None"]
master_hash_list.append(sha256_hashes)
if args.sha1file != 'None':
sha1_hashes = return_list_from_file(args.sha1file)
master_hash_list.append(sha1_hashes)
else:
sha1_hashes = ["None"]
master_hash_list.append(sha1_hashes)
return master_hash_list
# Create list of objects of file names and hashes gathered from gdrive and dropbox
def get_hashes_from_download(folder_name, log_file):
if os.path.exists(folder_name):
master_hash_list = list()
# parse deleted
if os.path.exists(folder_name + "/deleted"):
# parse deleted Google docs
if os.path.exists(folder_name + "/deleted/_google"):
hash_list = collect_hashes(folder_name + "/deleted/_google", log_file)
master_hash_list = add_hashes_to_master_list(master_hash_list, hash_list)
# deleted items hash collector
hash_list = collect_hashes(folder_name + "/deleted", log_file)
master_hash_list = add_hashes_to_master_list(master_hash_list, hash_list)
# parse regular files
if os.path.exists(folder_name + "/regular"):
# parse deleted Google docs
if os.path.exists(folder_name + "/regular/_google"):
hash_list = collect_hashes(folder_name + "/regular/_google", log_file)
master_hash_list = add_hashes_to_master_list(master_hash_list, hash_list)
# call non-Google doc items hash collector
hash_list = collect_hashes(folder_name + "/regular", log_file)
master_hash_list = add_hashes_to_master_list(master_hash_list, hash_list)
return master_hash_list
else:
sys.exit("ERROR: Folder does not exist. Exiting...")
# Concatenates the master_list and the new hash_list
def add_hashes_to_master_list(master_list, hashes_to_add):
if not hashes_to_add:
return master_list
for obj in hashes_to_add:
if isinstance(obj, FileObject):
master_list.append(obj)
return master_list
# takes in current hash lists and appends newly found hash values to them
def collect_hashes(path, log_file):
if os.path.exists(path + "/_hashes.txt"):
hash_file = open(path + "/_hashes.txt", 'r')
hash_list = list()
count = 0
for line in hash_file:
if count % 4 == 0:
hash_obj = FileObject()
hash_obj.set_name(line.strip())
if count % 4 == 1:
line_split = line.split(' ')
hash_obj.set_md5(line_split[1].strip())
if count % 4 == 2:
line_split = line.split(' ')
hash_obj.set_sha1(line_split[1].strip())
if count % 4 == 3:
line_split = line.split(' ')
hash_obj.set_sha256(line_split[1].strip())
hash_list.append(hash_obj)
count = count + 1
return hash_list
else:
return []
# Performs hash matching, alters the match status in the objects accordingly
# then returns the objects list
def hash_matching(list_of_downloaded_objects, read_in_hashes):
md5 = read_in_hashes[0]
sha256 = read_in_hashes[1]
sha1 = read_in_hashes[2]
if md5:
for hash in md5:
for obj in list_of_downloaded_objects:
if obj.get_md5().strip() == hash.strip():
obj.set_md5_hash_match(True)
if sha256:
for hash in sha256:
for obj in list_of_downloaded_objects:
if obj.get_sha256().strip() == hash.strip():
obj.set_sha256_hash_match(True)
if sha1:
for hash in sha1:
for obj in list_of_downloaded_objects:
if obj.get_sha1().strip() == hash.strip():
obj.set_sha1_hash_match(True)
return list_of_downloaded_objects
# Performs positive hashing. Returns objects that match given hashes.
def positive_hashing(list_of_downloaded_objects):
positive_md5 = list()
positive_sha256 = list()
positive_sha1 = list()
for obj in list_of_downloaded_objects:
if obj.get_md5_match() == True:
positive_md5.append(obj)
if obj.get_sha256_match() == True:
positive_sha256.append(obj)
if obj.get_sha1_match() == True:
positive_sha1.append(obj)
results = [positive_md5, positive_sha1, positive_sha256]
return results
# Performs negative hashing. Returns object that don't match given hashes
def negative_hashing(list_of_downloaded_objects):
negative_md5 = list()
negative_sha256 = list()
negative_sha1 = list()
for obj in list_of_downloaded_objects:
if obj.get_md5_match() == False:
negative_md5.append(obj)
if obj.get_sha256_match() == False:
negative_sha256.append(obj)
if obj.get_sha1_match() == False:
negative_sha1.append(obj)
results = [negative_md5, negative_sha1, negative_sha256]
return results
# Reads in a specified file full of hashes (one hash per line) and returns a list
# of the hashes
def return_list_from_file(read_file):
hash_list = list()
file1 = open(read_file, 'r')
for line in file1:
hash_list.append(line)
return hash_list
def hash_checker(folder_name, args, log_file):
master_list = create_hash_lists(args)
log_and_print(log_file, "Retrieving hashes from '" + folder_name + "'... ", False)
downloaded_files_objects = get_hashes_from_download(folder_name, log_file)
log_and_print(log_file, "Done!")
log_and_print(log_file, "Finding matching hashes... ", False)
hash_matches = hash_matching(downloaded_files_objects, master_list)
log_and_print(log_file, "Done!")
if args.positive == True:
positive_matches = positive_hashing(hash_matches)
return positive_matches
else:
negative_matches = negative_hashing(hash_matches)
return negative_matches
|
python
|
'''
Get the hottest news title on baidu page,
then save these data into mysql
'''
import datetime
import pymysql
from pyquery import PyQuery as pq
import requests
from requests.exceptions import ConnectionError
URL = 'https://www.baidu.com/s?wd=%E7%83%AD%E7%82%B9'
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',
'Upgrade-Insecure-Requests': '1'
}
def get_html(url):
try:
response = requests.get(url, headers=headers)
if response.status_code == 200:
return response.text
return None
except ConnectionError as e:
print(e.args)
return None
def parse_html(html):
doc = pq(html)
trs = doc('.FYB_RD table.c-table tr').items()
for tr in trs:
index = tr('td:nth-child(1) span.c-index').text()
title = tr('td:nth-child(1) span a').text()
hot = tr('td:nth-child(2)').text().strip('"')
yield {
'index':index,
'title':title,
'hot':hot
}
def save_to_mysql(items):
try:
db = pymysql.connect(host='localhost', port=3306, user='root', passwd='123456',
db='crawls', charset='utf8')
cursor = db.cursor()
cursor.execute('use crawls;')
cursor.execute('CREATE TABLE IF NOT EXISTS baiduNews('
'id INT PRIMARY KEY NOT NULL AUTO_INCREMENT,'
'ranking VARCHAR(30),'
'title VARCHAR(60),'
'datetime TIMESTAMP,'
'hot VARCHAR(30));')
try:
for item in items:
print(item)
now = datetime.datetime.now()
now = now.strftime('%Y-%m-%d %H:%M:%S')
sql_query = 'INSERT INTO baiduNews(ranking, title, datetime, hot) VALUES ("%s", "%s", "%s", "%s")' % (
item['index'], item['title'], now, item['hot'])
cursor.execute(sql_query)
print('Save into mysql')
db.commit()
except pymysql.MySQLError as e:
db.rollback()
print(e.args)
return
except pymysql.MySQLError as e:
print(e.args)
return
def check_mysql():
try:
db = pymysql.connect(host='localhost', port=3306, user='root', passwd='123456',
db='crawls', charset='utf8')
cursor = db.cursor()
cursor.execute('use crawls;')
sql_query = 'SELECT * FROM baiduNews'
results = cursor.execute(sql_query)
print(results)
except pymysql.MySQLError as e:
print(e.args)
def main():
html = get_html(URL)
items = parse_html(html)
save_to_mysql(items)
#check_mysql()
if __name__ == '__main__':
main()
|
python
|
'''1. 游戏编程:按以下要求定义一个乌龟类和鱼类并尝试编写游戏。
(初学者不一定可以完整实现,但请务必先自己动手,你会从中学习到很多知识的^_^)
假设游戏场景为范围(x, y)为0<=x<=10,0<=y<=10
游戏生成1只乌龟和10条鱼
它们的移动方向均随机
乌龟的最大移动能力是2(Ta可以随机选择1还是2移动),鱼儿的最大移动能力是1
当移动到场景边缘,自动向反方向移动
乌龟初始化体力为100(上限)
乌龟每移动一次,体力消耗1
当乌龟和鱼坐标重叠,乌龟吃掉鱼,乌龟体力增加20
鱼暂不计算体力
当乌龟体力值为0(挂掉)或者鱼儿的数量为0游戏结束
'''
import random as r
legal_x = [0 , 10]
legal_y = [0 , 10]
class Turtle:
def __init__(self):
# 初始体力
self.power = 100
# 初始位置随机
self.x = r.randint(legal_x[0] , legal_x[1])
self.y = r.randint(legal_y[0] , legal_y[1])
def move(self):
# 随机计算方向并移动到新的位置(x,y)
new_x = self.x + r.choice([1 , 2 , -1 , -2])
new_y = self.y + r.choice([1 , 2 , -1 , -2])
# 检查移动后是否超出场景x轴边界
if new_x < legal_x[0]:
self.x = legal_x[0] - (new_x - legal_x[0])
elif new_x > legal_x[1]:
self.x = legal_x[1] - (new_x - legal_x[1])
else:
self.x = new_x
# 检查移动后是否超出场景y轴边界
if new_y < legal_x[0]:
self.y = legal_y[0] - (new_y - legal_y[0])
elif new_y > legal_y[1]:
self.y = legal_y[1] - (new_y - legal_y[1])
else:
self.y = new_y
# 体力消耗
self.power -= 1
return (self.x , self.y)
def eat(self):
self.power += 20
if self.power > 100:
self.power = 100
class Fish:
def __init__(self):
self.x = r.randint(legal_x[0] , legal_x[1])
self.y = r.randint(legal_y[0] , legal_y[1])
def move(self):
# 随机计算方向并移动到新的位置(x , y)
new_x = self.x + r.choice([1 , -1])
new_y = self.y + r.choice([1 , -1])
# 检查移动后是否超出场景x轴边界
if new_x < legal_x[0]:
self.x = legal_x[0] - (new_x - legal_x[0])
elif new_x > legal_x[1]:
self.x = legal_x[1] - (new_x - legal_x[1])
else:
self.x = new_x
# 检查移动后是否超出场景y轴边界
if new_y < legal_y[0]:
self.y = legal_y[0] - (new_y - legal_y[0])
if new_y > legal_y[1]:
self.y = legal_y[1] - (new_y - legal_y[1])
else:
self.y = new_y
return(self.x , self.y)
turtle = Turtle()
fish = []
for i in range(10):
new_fish = Fish()
fish.append(new_fish)
while True:
if not len(fish):
print('鱼儿都吃完了,游戏结束!')
break
if not turtle.power:
print('乌龟累死了!')
break
pos = turtle.move()
for each_fish in fish:
if each_fish.move() == pos:
# 鱼被吃掉了
turtle.eat()
fish.remove(each_fish)
print('有一条鱼被吃掉了...')
|
python
|
salario = float(input('Qual é o salário do Funcionário? R$'))
aumentoSalario = salario + (salario * (5 / 100))
print(f'Um funcionário que ganhava R${salario:.2f}, com 15% de aumento, passa a receber R${aumentoSalario:.2f}')
|
python
|
import bs4 # type: ignore[import]
from .activity import _parse_subtitles, _parse_caption, _is_location_api_link
# bring into scope
from .comment import test_parse_html_comment_file # noqa: F401
from .html_time_utils import test_parse_dt # noqa: F401
def bs4_div(html: str) -> bs4.element.Tag:
tag = bs4.BeautifulSoup(html, "lxml").select_one("div")
assert tag is not None
return tag
def test_parse_subtitles() -> None:
content = bs4_div(
"""<div class="content-cell mdl-cell mdl-cell--6-col mdl-typography--body-1">Visited <a href="https://support.google.com/youtube/answer/7071292?hl=en">Get support with Premium memberships & purchases - YouTube Help</a><br>Aug 25, 2020, 5:06:44 PM PDT</div>"""
)
subs, dt = _parse_subtitles(content, file_dt=None)
assert subs == [
(
"Visited Get support with Premium memberships & purchases - YouTube Help",
"https://support.google.com/youtube/answer/7071292?hl=en",
)
]
assert dt is not None
content = bs4_div(
"""<div class="content-cell mdl-cell mdl-cell--6-col mdl-typography--body-1">6 cards in your feed<br/>Sep 4, 2020, 11:01:46 AM PDT</div>"""
)
subs, dt = _parse_subtitles(content, file_dt=None)
assert subs == [("6 cards in your feed", None)]
# parses into a DstTzInfo timezone, so just testing that it parsed
assert int(dt.timestamp()) == 1599242506
content = bs4_div(
"""<div class="content-cell mdl-cell mdl-cell--6-col mdl-typography--body-1">1 notification<br>Including topics:<br><a href="https://www.google.com/maps/place/?q=place_id:XX">Emergency resources and information</a><br>Sep 1, 2020, 9:27:07 PM PDT</div>""",
)
subs, dt = _parse_subtitles(content, file_dt=None)
# how multiple lines of body look in subtitles
assert subs == [
("1 notification", None),
("Including topics:", None),
(
"Emergency resources and information",
"https://www.google.com/maps/place/?q=place_id:XX",
),
]
assert dt is not None
def test_parse_captions() -> None:
content = bs4_div(
"""<div class="content-cell mdl-cell mdl-cell--12-col mdl-typography--caption"><b>Products:</b><br> Drive<br><b>Details:</b><br> From IP 8.8.8.8<br></div>"""
)
details, locationInfos, products = _parse_caption(content)
assert details == ["From IP 8.8.8.8"]
assert products == ["Drive"]
assert locationInfos == []
def test_parse_locations() -> None:
content = bs4_div(
"""<div class="content-cell mdl-cell mdl-cell--12-col mdl-typography--caption"><b>Products:</b><br> Discover<br><b>Locations:</b><br> At <a href="https://www.google.com/maps/@?something">this general area</a> - From <a href="https://support.google.com/maps/answer/1">your places</a> (Home)<br></div>"""
)
details, locationInfos, products = _parse_caption(content)
assert details == []
assert products == ["Discover"]
assert locationInfos == [
(
"At this general area",
"https://www.google.com/maps/@?something",
"From your places (Home)",
"https://support.google.com/maps/answer/1",
)
]
content = bs4_div(
"""<div class="content-cell mdl-cell mdl-cell--12-col mdl-typography--caption"><b>Products:</b><br> Maps<br><b>Locations:</b><br> At <a href="https://www.google.com/maps/@?api=1&map_action=map¢er=3,-18&zoom=11">this general area</a> - Based on your past activity<br></div>"""
)
details, locationInfos, products = _parse_caption(content)
assert details == []
assert products == ["Maps"]
assert locationInfos == [
(
"At this general area",
"https://www.google.com/maps/@?api=1&map_action=map¢er=3,-18&zoom=11",
"Based on your past activity",
None,
)
]
def test_parse_is_google_url() -> None:
assert _is_location_api_link(
"https://www.google.com/maps/@?api=1&map_action=map¢er=3,-18&zoom=11"
)
assert not _is_location_api_link("https://www.google.com/")
|
python
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import unittest
import pytest
from builtins import open
from future import standard_library
#standard_library.install_aliases()
from builtins import *
from freezegun import freeze_time
from furl import furl
from nzbhydra import config
from nzbhydra.database import Indexer
from nzbhydra.indexers import getIndexerSettingByName
from nzbhydra.search import SearchRequest
from nzbhydra.searchmodules.nzbclub import NzbClub
from nzbhydra.tests.UrlTestCase import UrlTestCase
from nzbhydra.tests.db_prepare import set_and_drop
class NzbclubTests(UrlTestCase):
@pytest.fixture
def setUp(self):
set_and_drop()
def testUrlGeneration(self):
w = NzbClub(getIndexerSettingByName("nzbclub"))
self.args = SearchRequest(query="a showtitle", season=1, episode=2)
urls = w.get_showsearch_urls(self.args)
self.assertEqual(1, len(urls))
self.assertEqual('a showtitle s01e02 or a showtitle 1x02', furl(urls[0]).args["q"])
self.args = SearchRequest(query="a showtitle", season=1, episode=None)
urls = w.get_showsearch_urls(self.args)
self.assertEqual(1, len(urls))
self.assertEqual('a showtitle s01 or a showtitle "season 1"', furl(urls[0]).args["q"])
self.args = SearchRequest(query="aquery", minage=4)
urls = w.get_showsearch_urls(self.args)
self.assertEqual(1, len(urls))
self.assertUrlEqual("https://www.nzbclub.com/nzbrss.aspx?ig=2&ns=1&q=aquery&rpp=250&sn=1&st=5&ds=4", urls[0])
self.args = SearchRequest(query="aquery", minage=18 * 31) #Beyond the last defined limit of days
urls = w.get_showsearch_urls(self.args)
self.assertEqual(1, len(urls))
self.assertUrlEqual("https://www.nzbclub.com/nzbrss.aspx?ig=2&ns=1&q=aquery&rpp=250&sn=1&st=5&ds=27", urls[0])
self.args = SearchRequest(query="aquery", minage=70)
urls = w.get_showsearch_urls(self.args)
self.assertEqual(1, len(urls))
self.assertUrlEqual("https://www.nzbclub.com/nzbrss.aspx?ig=2&ns=1&q=aquery&rpp=250&sn=1&st=5&ds=12", urls[0])
self.args = SearchRequest(query="aquery", maxage=18 * 31) # Beyond the last defined limit of days, so don't limit
urls = w.get_showsearch_urls(self.args)
self.assertEqual(1, len(urls))
self.assertUrlEqual("https://www.nzbclub.com/nzbrss.aspx?ig=2&ns=1&q=aquery&rpp=250&sn=1&st=5", urls[0])
self.args = SearchRequest(query="aquery", minage=4, maxage=70)
urls = w.get_showsearch_urls(self.args)
self.assertEqual(1, len(urls))
self.assertUrlEqual("https://www.nzbclub.com/nzbrss.aspx?ig=2&ns=1&q=aquery&rpp=250&sn=1&st=5&de=13&ds=4", urls[0])
self.args = SearchRequest(query="aquery", minsize=3)
urls = w.get_showsearch_urls(self.args)
self.assertEqual(1, len(urls))
self.assertUrlEqual("https://www.nzbclub.com/nzbrss.aspx?ig=2&ns=1&q=aquery&rpp=250&sn=1&st=5&szs=8", urls[0])
self.args = SearchRequest(query="aquery", minsize=2400)
urls = w.get_showsearch_urls(self.args)
self.assertEqual(1, len(urls))
self.assertUrlEqual("https://www.nzbclub.com/nzbrss.aspx?ig=2&ns=1&q=aquery&rpp=250&sn=1&st=5&szs=23", urls[0])
self.args = SearchRequest(query="aquery", maxsize=2400)
urls = w.get_showsearch_urls(self.args)
self.assertEqual(1, len(urls))
self.assertUrlEqual("https://www.nzbclub.com/nzbrss.aspx?ig=2&ns=1&q=aquery&rpp=250&sn=1&st=5&sze=24", urls[0])
self.args = SearchRequest(query="aquery", maxsize=30*1024*1024) #Beyond the last defined limit of size, so don't limit
urls = w.get_showsearch_urls(self.args)
self.assertEqual(1, len(urls))
self.assertUrlEqual("https://www.nzbclub.com/nzbrss.aspx?ig=2&ns=1&q=aquery&rpp=250&sn=1&st=5", urls[0])
self.args = SearchRequest(query="aquery", minsize=3, maxsize=2400)
urls = w.get_showsearch_urls(self.args)
self.assertEqual(1, len(urls))
self.assertUrlEqual("https://www.nzbclub.com/nzbrss.aspx?ig=2&ns=1&q=aquery&rpp=250&sn=1&st=5&sze=24&szs=8", urls[0])
self.args = SearchRequest(query="aquery", forbiddenWords=["ignorethis"])
urls = w.get_showsearch_urls(self.args)
self.assertEqual(1, len(urls))
self.assertEqual("https://www.nzbclub.com/nzbrss.aspx?rpp=250&ns=1&sn=1&ig=2&st=5&q=aquery+-ignorethis", urls[0])
self.args = SearchRequest(query="a showtitle", season=2016, episode="08/08")
urls = w.get_showsearch_urls(self.args)
self.assertEqual(1, len(urls))
self.assertEqual('a showtitle "2016 08 08"', furl(urls[0]).args["q"])
@freeze_time("2015-09-24 14:00:00", tz_offset=-4)
def testProcess_results(self):
w = NzbClub(getIndexerSettingByName("nzbclub"))
with open("mock/nzbclub--q-testtitle.xml", encoding="latin-1") as f:
entries = w.process_query_result(f.read(), SearchRequest()).entries
self.assertEqual('testtitle1', entries[0].title)
self.assertEqual("http://www.nzbclub.com/nzb_get/60269450/testtitle1.nzb", entries[0].link)
self.assertEqual(1075514926, entries[0].size)
self.assertEqual("60269450", entries[0].indexerguid)
self.assertEqual(1443019463, entries[0].epoch)
self.assertEqual("2015-09-23T09:44:23-05:00", entries[0].pubdate_utc)
self.assertEqual("Wed, 23 Sep 2015 09:44:23 -0500", entries[0].pubDate)
self.assertEqual(0, entries[0].age_days)
self.assertEqual("http://www.nzbclub.com/nzb_view/60269450/testtitle1", entries[0].details_link)
self.assertEqual("[email protected] (YIFY)", entries[0].poster)
self.assertEqual("alt.binaries.movies", entries[0].group)
def testGetNzbLink(self):
n = NzbClub(getIndexerSettingByName("nzbclub"))
link = n.get_nzb_link("guid", "title")
self.assertEqual("https://www.nzbclub.com/nzb_get/guid/title.nzb", link)
|
python
|
from PIL import ImageGrab
from PIL import BmpImagePlugin
from aip import AipOcr
from win10toast import ToastNotifier
import sys, os
import keyboard
import time, datetime
import random
from os import path
from multiprocessing.dummy import Pool as ThreadPool
PIC_DIR = r"C:\Users\hytian3019\Pictures\ocr"
#百度云账号设置
APP_ID = '18427626'
API_KEY = 'GuE2qgGbvECFxdDDahxeL4Vn'
SECRET_KEY = 'nbrSyUGfsXxnCXhyOZ0nN5N804UoXwAO'
#百度云api对象
client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
toaster = ToastNotifier()
esc_pressed = False
ctrl_prtscr_pressed = False
def print_sep(n = 50):
print('\n'+ '>' * 50 + '\n')
def ctrl_prtscr_cb():
global ctrl_prtscr_pressed
print('- [Ctrl + PrtScr] pressed')
toaster.show_toast('PyOCR',"[Ctrl + PrtScr] pressed pressed", duration=1.5)
ctrl_prtscr_pressed = True
def esc_cb():
global ctrl_prtscr_pressed
global esc_pressed
if ctrl_prtscr_pressed:
print('- [Esc] pressed, skip this hot key.')
print_sep()
toaster.show_toast('PyOCR',"[Esc] pressed, skip hot key", duration=1.5)
esc_pressed = True
keyboard.add_hotkey('esc', esc_cb)
keyboard.add_hotkey('Ctrl+print screen', ctrl_prtscr_cb)
def get_file_content(filePath):
with open(filePath, 'rb') as fp:
return fp.read()
def clip_text(astr):
os.system(f'echo {astr} | clip')
def ocr_image(pic_path):
"""
docstring
"""
image = get_file_content(pic_path)
options = dict(
probability = True,
recognize_granularity = 'small',
poly_location = True,
)
data = client.general(image, options)
return data
def ocr():
global ctrl_prtscr_pressed
global esc_pressed
esc_pressed = False
ctrl_prtscr_pressed = False
print('Waiting for hot key [Ctrl + PrtScr] ...')
keyboard.wait('Ctrl+print screen')
while not esc_pressed:
time.sleep(1) #等待截图
im = ImageGrab.grabclipboard() #获取剪切板中的图片
if isinstance(im, BmpImagePlugin.DibImageFile): #若剪切板的内容可以解析成图片
#print('- Found image on clipboard.')
#文件名
ts = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
rnd = str(random.randint(100, 1000))
#保存图片
pic_path = path.join(PIC_DIR, ts + rnd + '.png')
im.save(pic_path)
#print('- Saved to ', pic_path)
data = ocr_image(pic_path)
#print('- Parsed results: ', data)
words_result = data['words_result']
data = [] # 需要保存的内容
for words in words_result:
data.append(words['words'])
data = ''.join(data).strip()
#换成中文标点符号
data = data.replace(',', ',')
data = data.replace(':', ':')
data = data.replace(';', ';')
data = data.replace('(', '(')
data = data.replace(')', ')')
print('- OCR result: ', data)
#print('Put text on clipboard.')
print_sep()
clip_text(data)
toaster.show_toast('PyOCR', data, duration=1.5)
break
if __name__ == "__main__":
#ocr()
#clip_text('好的让我')
#toaster.show_toast('PyOCR',"Hot key pressed", duration=2)
print(ocr_image(r'c:\temp\ocr.png'))
|
python
|
"""
targetstateinfidelity.py - This module defines a cost function that
penalizes the infidelity of an evolved state and a target state.
"""
import autograd.numpy as anp
import numpy as np
from qoc.models import Cost
from qoc.standard.functions import conjugate_transpose
class TargetStateInfidelity(Cost):
"""
This cost penalizes the infidelity of an evolved state
and a target state.
Fields:
cost_multiplier
name
requires_step_evaluation
state_count
target_states_dagger
"""
name = "target_state_infidelity"
requires_step_evaluation = False
def __init__(self, target_states, cost_multiplier=1.):
"""
See class fields for arguments not listed here.
Arguments:
target_states
"""
super().__init__(cost_multiplier=cost_multiplier)
self.state_count = target_states.shape[0]
self.target_states_dagger = conjugate_transpose(target_states)
def cost(self, controls, states, system_eval_step):
"""
Compute the penalty.
Arguments:
controls
states
system_eval_step
Returns:
cost
"""
# The cost is the infidelity of each evolved state and its target state.
inner_products = anp.matmul(self.target_states_dagger, states)[:, 0, 0]
fidelities = anp.real(inner_products * anp.conjugate(inner_products))
fidelity_normalized = anp.sum(fidelities) / self.state_count
infidelity = 1 - fidelity_normalized
return infidelity * self.cost_multiplier
|
python
|
#Micro codes for checking the Pandas
# Read CSV in chunks
data_read = pd.read_csv('Filename.csv',chunksize=x,encoding = "any encoding format for eg :ISO-8859-1")
data = pd.concat(data_read,ignore_index=True)
# Split the dataframe based on the numeric and categorical values
# Numeric Split
cleansed_data_numeric = data.select_dtypes(include=['number']).columns
data_read_numeric = data.loc[:,lambda data : cleansed_data_numeric]
numeric_null_count = data_read_numeric.apply(lambda x : sum(x.notnull()))
# Categorical Split
cleansed_data_category = data.select_dtypes(exclude=['number']).columns
data_read_category = data.loc[:,lambda data : cleansed_data_category]
categorical_null_count = data_read_category.apply(lambda x : sum(x.notnull()))
# Date Difference and Date Conversion Logic
import datetime as DT
data['Date'].dtype
#pd.to_datetime(data['Date'])
now = pd.Timestamp(DT.datetime.now())
data['Date'] = pd.to_datetime(data['Date'], format='%m/%d/%Y')
# Difference
data['DOB_NEW'] =data['DOB_NEW'].where(data['DOB_NEW'] < now, data['DOB_NEW'] - np.timedelta64(100, 'Y')) # 2
data['Age_Driver1'] = (now - data['DOB_NEW']).astype('<m8[Y]') # 3
# Copying chunks of data from one frame to another
# data=original frame ptsdriver2 = copied frame
ptsdriver2 = data[['ViolPoints2Driver_2',
'ViolPoints1Driver_2',
'ViolPoints3Driver_2',
'ViolPoints4Driver_2',
'ViolPoints5Driver_2',
'ViolPoints6Driver_2',
'ViolPoints7Driver_2',
'ViolPoints8Driver_2',
]].copy()
# Sum of values in the frame row-wise
ptsdriver2['Points_Driver2'] = ptsdriver2.apply(lambda x : x.sum(),axis=1)
# Replace Blank values with NaN
dataframe.replace(r'^\s*$', np.NaN, regex=True, inplace = True)
# Scaling the values to remove if you want to treat numeric values on same scale
# Libraries
from sklearn.preprocessing import StandardScaler
#Step1 : Create ScaleClean using
#Example :
ScaleClean = StandardScaler().fit(data[['Zip']])
#Step2 : Create Clean transform
#Example :
CleanTransform = ScaleClean.transform(data[['Zip']])
#Step3 : Create dataframe of the Clean Transform
#Example : clean = pd.DataFrame(CleanTransform)
#Step4 : Join/Concatenate the Frames
frames = [data,clean]
data = pd.concat(frames,axis=1)
#Step4 : Drop the Original Columns and Rename the New Ones
data = data.drop(['Zip'],1)
data = data.rename(columns={0:'Zip'})
#Copying the data in a different frame based on column value condition
datax = data[data['Gender']=='M']
# Striptime and Date Difference in days
data['DiscoveryDate']= pd.DatetimeIndex(data['Discovery Date']).date
data['OccurrenceStartDate']= pd.DatetimeIndex(data['Occurrence Start Date']).date
data['Occurrence_Discovery_Diff']= (data['OccurrenceStartDate'] - data['DiscoveryDate'])/np.timedelta64(1, 'D')
# Remove negative values from columns
dat_pos1 =data_numeric[(data_numeric[
['Net Loss','Recovery Amount',
'Estimated Gross Loss',
'Recovery Amount (percent)',
'Occurrence_Discovery_Diff']] > 0).all(1)]
|
python
|
import numpy as np
import tensorflow as tf
class Weights:
def __init__(self, initializer, depth):
self.initializer = initializer
self.depth = depth
def __call__(self, num_inputs, num_outputs):
x = tf.Variable(self.initializer(shape=(num_inputs, self.depth)))
y = tf.Variable(self.initializer(shape=(self.depth, num_outputs)))
return tf.matmul(x, y)
class Biases:
def __init__(self, initializer, depth):
self.initializer = initializer
self.depth = depth
def __call__(self, num_units):
x = tf.Variable(self.initializer(shape=(1, self.depth)))
y = tf.Variable(self.initializer(shape=(self.depth, num_units)))
return tf.reshape(tf.matmul(x, y), shape=(num_units,))
|
python
|
#!/usr/bin/env python3
# ============================================================================================= #
# Distributed dynamic map fusion via federated learning for intelligent networked vehicles, #
# 2021 International Conference on Robotics and Automation (ICRA) #
# ============================================================================================= #
# amend relative import
import sys
from pathlib import Path
sys.path.append( Path(__file__).resolve().parent.parent.as_posix() ) #repo path
sys.path.append( Path(__file__).resolve().parent.as_posix() ) #file path
from params import *
# original import
import subprocess as sp
from pathlib import Path
ITER_MAX = 10
counter = 0
print(ENTRY_FILE)
while counter < ITER_MAX:
print('===================== %d ====================='%counter)
sp.run([ENTRY_FILE, counter])
counter += 1
pass
|
python
|
import argparse
import csv
import torch
import torch.nn as nn
import torch.nn.functional as F
#from models.gatedconv import InpaintGCNet, InpaintDirciminator
from models.sa_gan import InpaintSANet, InpaintSADirciminator
from models.loss import SNDisLoss, SNGenLoss, ReconLoss
from data.inpaint_dataset import InpaintDataset
from util.evaluation import AverageMeter
from evaluation import metrics
from PIL import Image
import pickle as pkl
import numpy as np
import logging
import time
import sys
import os
from test_images import validate
from util.util import load_consistent_state_dict
def train(netG, netD, GANLoss, ReconLoss, DLoss, optG, optD, dataloader, epoch, img_size, loss_writer):
"""
Train Phase, for training and spectral normalization patch gan in
Free-Form Image Inpainting with Gated Convolution (snpgan)
"""
netG.train()
netD.train()
for i, (imgs, masks, _, _, _) in enumerate(dataloader):
# masks = masks['val']
# Optimize Discriminator
optD.zero_grad(), netD.zero_grad(), netG.zero_grad(), optG.zero_grad()
align_corners=True
# imgs = F.interpolate(imgs, img_size, mode='bicubic', align_corners=align_corners)
# imgs = imgs.clamp(min=-1, max=1)
# masks = F.interpolate(masks, img_size, mode='bicubic', align_corners=align_corners)
# masks = (masks > 0).type(torch.FloatTensor)
imgs, masks = imgs.cuda(), masks.cuda()
coarse_imgs, recon_imgs = netG(imgs, masks)
complete_imgs = recon_imgs * masks + imgs * (1 - masks)
pos_imgs = torch.cat([imgs, masks, torch.full_like(masks, 1.)], dim=1)
neg_imgs = torch.cat([complete_imgs, masks, torch.full_like(masks, 1.)], dim=1)
pos_neg_imgs = torch.cat([pos_imgs, neg_imgs], dim=0)
pred_pos_neg = netD(pos_neg_imgs)
pred_pos, pred_neg = torch.chunk(pred_pos_neg, 2, dim=0)
d_loss = DLoss(pred_pos, pred_neg)
# losses['d_loss'].update(d_loss.item(), imgs.size(0))
d_loss_val = d_loss.item()
d_loss.backward(retain_graph=True)
optD.step()
# Optimize Generator
optD.zero_grad(), netD.zero_grad(), optG.zero_grad(), netG.zero_grad()
pred_neg = netD(neg_imgs)
#pred_pos, pred_neg = torch.chunk(pred_pos_neg, 2, dim=0)
g_loss = GANLoss(pred_neg)
r_loss = ReconLoss(imgs, coarse_imgs, recon_imgs, masks)
whole_loss = g_loss + r_loss
# Update the recorder for losses
# losses['g_loss'].update(g_loss.item(), imgs.size(0))
# losses['r_loss'].update(r_loss.item(), imgs.size(0))
# losses['whole_loss'].update(whole_loss.item(), imgs.size(0))
g_loss_val = g_loss.item()
r_loss_val = r_loss.item()
whole_loss_val = whole_loss.item()
whole_loss.backward()
optG.step()
if (i+1) % 25 == 0:
print("Epoch {0} [{1}/{2}]: Whole Loss:{whole_loss:.4f} "
"Recon Loss:{r_loss:.4f} GAN Loss:{g_loss:.4f} D Loss:{d_loss:.4f}" \
.format(epoch, i+1, len(dataloader), whole_loss=whole_loss_val, r_loss=r_loss_val \
,g_loss=g_loss_val, d_loss=d_loss_val))
loss_writer.writerow([epoch,whole_loss_val, r_loss_val, g_loss_val, d_loss_val])
def main(args):
if not os.path.exists(args.logdir):
os.makedirs(args.logdir)
dataset_type = args.dataset
# Dataset setting
train_dataset = InpaintDataset(args.train_image_list,\
{'val':args.train_mask_list},
mode='train', img_size=args.img_shape)
train_loader = train_dataset.loader(batch_size=args.batch_size, shuffle=True,
num_workers=4,pin_memory=True)
val_dataset = InpaintDataset(args.val_image_list,\
{'val':args.val_mask_list},
# {'val':args.val_mask_list},
mode='val', img_size=args.img_shape)
val_loader = val_dataset.loader(batch_size=1, shuffle=False,
num_workers=1)
# Define the Network Structure
netG = InpaintSANet()
netD = InpaintSADirciminator()
netG.cuda()
netD.cuda()
if args.load_weights != '':
whole_model_path = args.load_weights
nets = torch.load(whole_model_path)
netG_state_dict, netD_state_dict = nets['netG_state_dict'], nets['netD_state_dict']
# netG.load_state_dict(netG_state_dict)
load_consistent_state_dict(netG_state_dict, netG)
netD.load_state_dict(netD_state_dict)
# Define loss
recon_loss = ReconLoss(*([1.2, 1.2, 1.2, 1.2]))
gan_loss = SNGenLoss(0.005)
dis_loss = SNDisLoss()
lr, decay = args.learning_rate, 0.0
optG = torch.optim.Adam(netG.parameters(), lr=lr, weight_decay=decay)
optD = torch.optim.Adam(netD.parameters(), lr=4*lr, weight_decay=decay)
best_score = 0
# Create loss and acc file
loss_writer = csv.writer(open(os.path.join(args.logdir, 'loss.csv'),'w'), delimiter=',')
acc_writer = csv.writer(open(os.path.join(args.logdir, 'acc.csv'),'w'), delimiter=',')
# Start Training
for i in range(args.epochs):
#train data
train(netG, netD, gan_loss, recon_loss, dis_loss, optG, optD, train_loader, i+1, args.img_shape, loss_writer)
# validate
output_dir = os.path.join(args.result_dir,str(i+1))
mse, ssim = validate(netG, val_loader, args.img_shape, output_dir, args.gt_dir)
score = 1 - mse/100 + ssim
print('MSE: ', mse, ' SSIM:', ssim, ' SCORE:', score)
acc_writer.writerow([i+1,mse,ssim,score])
saved_model = {
'epoch': i + 1,
'netG_state_dict': netG.state_dict(),
'netD_state_dict': netD.state_dict(),
# 'optG' : optG.state_dict(),
# 'optD' : optD.state_dict()
}
torch.save(saved_model, '{}/epoch_{}_ckpt.pth.tar'.format(args.logdir, i+1))
if score > best_score:
torch.save(saved_model, '{}/best_ckpt.pth.tar'.format(args.logdir, i+1))
best_score = score
print('New best score at epoch', i+1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--logdir', type=str, default='logs', help='')
parser.add_argument('--gt_dir', type=str, default='InpaintBenchmark/dlcv_gt_srgb', help='')
parser.add_argument('--result_dir', type=str, default='results', help='')
parser.add_argument('--dataset', type=str, default='places2', help='')
parser.add_argument('--train_image_list', type=str, default='TrainImgs/gt_srgb.txt', help='')
parser.add_argument('--train_mask_list', type=str, default='TrainImgs/masks.txt', help='')
parser.add_argument('--val_image_list', type=str, default='InpaintBenchmark/dlcv_list.txt', help='')
parser.add_argument('--val_mask_list', type=str, default='InpaintBenchmark/dlcv_mask.txt', help='')
# parser.add_argument('--train_image_list', type=str, default='TrainImgs/gt_small.txt', help='')
# parser.add_argument('--train_mask_list', type=str, default='TrainImgs/masks_small.txt', help='')
# parser.add_argument('--val_image_list', type=str, default='InpaintBenchmark/dlcv_list_small.txt', help='')
# parser.add_argument('--val_mask_list', type=str, default='InpaintBenchmark/dlcv_mask_small.txt', help='')
parser.add_argument('--img_shape', type=int, default=256, help='')
parser.add_argument('--model_path', type=str, default='model_logs/pretrained.pth.tar', help='')
parser.add_argument('--learning_rate', type=float, default=0.0001, help='')
parser.add_argument('--weight_decay', type=float, default=0.0, help='')
parser.add_argument('--batch_size', type=int, default=4, help='')
parser.add_argument('--epochs', type=int, default=300, help='')
# parser.add_argument('--load_weights', type=str, default='', help='')
parser.add_argument('--load_weights', type=str, default='model_logs/pretrained.pth.tar', help='')
# parser.add_argument('--', type=str, default='', help='')
# parser.add_argument('--', type=int, default=, help='')
# parser.add_argument('--', type=float, default=, help='')
args = parser.parse_args()
main(args)
|
python
|
from flask import Flask,jsonify,request
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Column,Integer,Float,String
import os
from flask_marshmallow import Marshmallow
from flask_jwt_extended import JWTManager,jwt_required,create_access_token
from flask_mail import Mail,Message
app=Flask(__name__)
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///'+os.path.join(basedir,"planets.db")
app.config['JWT_SECRET_KEY']='super secret' # chenge this in real life
app.config['MAIL_SERVER']='smtp.mailtrap.io'
app.config['MAIL_PORT'] = 2525
app.config['MAIL_USERNAME'] = ''
# app.config['MAIL_USERNAME'] = os.environ['MAIL_USERNAME']
app.config['MAIL_PASSWORD'] = ''
# app.config['MAIL_PASSWORD'] = os.environ['MAIL_PASSWORD']
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USE_SSL'] = False
db = SQLAlchemy(app)
ma = Marshmallow(app)
jwt = JWTManager(app)
mail=Mail(app)
# Start
@app.cli.command("db_create")
def db_create():
db.create_all()
print("databse created")
@app.cli.command("db_drop")
def db_drop():
db.drop_all()
print("databse droped")
'''
seeding databse
'''
@app.cli.command("db_seed")
def db_seed():
mercury = Planet(
planet_name='Mercury',
planet_type='Class D',
home_star='Sol',
mass=2.258e23,
radius=1516,
distance=35.98e6)
venus = Planet(
planet_name='Venus',
planet_type='Class K',
home_star='Sol',
mass=4.867e24,
radius=3760,
distance=67.24e6)
earth = Planet(
planet_name='Earth',
planet_type='Class M',
home_star='Sol',
mass=5.972e24,
radius=3959,
distance=92.96e6)
db.session.add(mercury)
db.session.add(venus)
db.session.add(earth)
test_user = User(first_name='William',
last_name='Herschel',
email='[email protected]',
password='P@ssw0rd')
db.session.add(test_user)
db.session.commit()
print("databse seeded!")
# End
@app.route("/",methods=["GET"])
def hello_world():
return jsonify( message ="hi this is looking awesome"),200
@app.route("/<int:id>",methods=["GET"])
def hello(id):
return jsonify(message=f"hi you entered the number is {id}"),400
@app.route("/parameters")
def parameters():
name=request.args.get("name")
age=int(request.args.get("age"))
if age<18:
return jsonify(message="hi "+name+ ', you are not old,hence you are not able to accesss this page'),401
# by default status code is 200 or ok
return jsonify(message="hello "+name+" you are old.")
@app.route("/url_parameter/<string:name>/<int:age>")
# def url_parameter(name,age): same work as below
def url_parameter(name:str,age:int):
if age<18:
return jsonify(message="hi "+name+ ', you are not old,hence you are not able to accesss this page'),401
# by default status code is 200 or ok
return jsonify(message="hello "+name+" you are old.")
##
@app.route("/planets",methods=["GET"])
def planets():
planet_list=Planet.query.all()
result = planets_schema.dump(planet_list)
return jsonify(result)
# return jsonify(data=planet_list)
@app.route("/register",methods=["POST"])
def register():
email=request.form['email']
test=User.query.filter_by(email=email).first()
if test:
return jsonify(message="email is already exist!"),409
first_name=request.form['first_name']
last_name=request.form['last_name']
password=request.form['password']
db.session.add(User(first_name=first_name,last_name=last_name,email=email,password=password))
db.session.commit()
return jsonify(message="user Created Successfully!"),201
@app.route("/login",methods=["POST"])
def login():
# api==json, or form
if request.is_json:
# if user is trying to login via api
email=request.json['email']
password=request.json['password']
else:
# if user is logging via form
email=request.form['email']
password=request.form['password']
test=User.query.filter_by(email=email,password=password).first()
if test:
# create token
access_token=create_access_token(identity=email)
return jsonify(message="login Successfully!",access_token=access_token)
else:
return jsonify(message="bad password or email"),401
@app.route("/retrieve_password/<string:email>",methods=["GET"])
def retrieve_password(email:str):
user = User.query.filter_by(email=email).first()
if user:
msg = Message(
"Your planetry api password is "+user.password,
sender="[email protected]",
recipients=[email])
mail.send(msg)
return jsonify(message="password sent to "+email)
else:
return jsonify(message=email+" email doesn't exist!"),401
@app.route("/planet_details/<int:planet_id>",methods=["GET"])
def planet_details(planet_id:int):
planet = Planet.query.filter_by(planet_id=planet_id).first()
if planet:
result = planet_schema.dump(planet)
return jsonify(result)
else:
return jsonify(message="that planet does not exist!"),404
@app.route("/add_planet",methods=["POST"])
@jwt_required()
def add_planet():
planet_name= request.form['planet_name']
planet = Planet.query.filter_by(planet_name=planet_name).first()
if planet:
return jsonify(message="the planet is exists already!"),409
else:
planet_type=request.form['planet_type']
home_star=request.form['home_star']
mass=request.form['mass']
radius=request.form["radius"]
distance=request.form["distance"]
db.session.add(Planet(planet_name=planet_name,planet_type=planet_type,home_star=home_star,mass=mass,radius=radius,distance=distance))
db.session.commit()
return jsonify(message="You added a planet Successfully!"),201
@app.route("/update_planet",methods=["PUT"])
@jwt_required()
def update_planet():
planet_id= request.form['planet_id']
planet = Planet.query.filter_by(planet_id=planet_id).first()
if planet:
planet.planet_name=request.form['planet_name']
planet.planet_type=request.form['planet_type']
planet.home_star=request.form['home_star']
planet.mass=request.form['mass']
planet.radius=request.form["radius"]
planet.distance=request.form["distance"]
db.session.commit()
return jsonify(message="You updated a planet Successfully!"),202
else:
return jsonify(message="the planet does not exists!"),404
@app.route("/remove_planet/<int:planet_id>",methods=["DELETE"])
@jwt_required()
def remove_planet(planet_id:int):
planet = Planet.query.filter_by(planet_id=planet_id).first()
if planet:
db.session.delete(planet)
db.session.commit()
return jsonify(message="You deleted a planet Successfully!"),202
else:
return jsonify(message="the planet does not exists!"),404
# Databse Models
class User(db.Model):
__tablename__="User"
id=Column(Integer,primary_key=True)
first_name=Column(String)
last_name=Column(String)
email=Column(String,unique=True)
password=Column(String)
class Planet(db.Model):
planet_id = Column(Integer,primary_key=True)
planet_type=Column(String)
planet_name=Column(String)
home_star=Column(String)
mass=Column(Float)
radius=Column(Float)
distance=Column(Float)
class UserSchema(ma.Schema):
class Meta:
fields=("id","first_name","last_name","email","password")
class PlanetSchema(ma.Schema):
class Meta:
fields=("planet_id","planet_type","planet_name","home_star","mass","radius","distance")
user_schema = UserSchema()
users_schema = UserSchema(many=True)
planet_schema = PlanetSchema()
planets_schema = PlanetSchema(many=True)
if __name__=="__main__":
app.run(debug=True)
|
python
|
# -*- coding: utf-8 -*-
import numpy as np
from math import factorial, exp, trunc
from linear_congruential_generator import LinearCongruentialGenerator
lcg = LinearCongruentialGenerator()
def poisson(lambda_value, random_number):
return round(((lambda_value ** random_number) * (np.e ** (-lambda_value))) / factorial(random_number), 3)
def poisson_random_value(lambda_value):
# algoritmo baseado no algoritmo de Knuth
# Gerando o Processo de Poisson Nao Estacionario (algoritmo thinning)
L = exp(-lambda_value)
p = 1.0
k = 0
r1 = lcg.generate_random_numbers(1).pop()
while (p>L):
r2 = lcg.generate_random_numbers(1, initial_seed=r1).pop()
r1 = r2
k+=1
p *= r2
return k - 1
def table_poisson():
for i in range(15):
aux = []
for j in drange(1, 5, 0.5):
aux.append(poisson(j, i))
print(aux)
def drange(start, stop, step):
r = start
while r <= stop:
yield r
r += step
def problema3():
primeiros_30 = 0
for i in drange(30, 60, 10.0):
probabilidade_dez_pessoas_pico = poisson_random_value(15)
primeiros_30 += probabilidade_dez_pessoas_pico
print('[11:{}] : {} Pessoas'.format(i, probabilidade_dez_pessoas_pico))
print('Somatorio total de pessoas = {}'.format(primeiros_30))
horario_de_pico = 0
for i in drange(10, 60, 10.0):
probabilidade_dez_pessoas_pico = poisson_random_value(25)
horario_de_pico += probabilidade_dez_pessoas_pico
print('[13:{}] : {} Pessoas'.format(i, probabilidade_dez_pessoas_pico))
print('Somatorio total de pessoas = {}'.format(horario_de_pico))
fim_espediente = 0
for i in drange(10, 60, 10.0):
probabilidade_dez_pessoas_pico = poisson_random_value(5)
fim_espediente += probabilidade_dez_pessoas_pico
print('[13:{}] : {} Pessoas'.format(i, probabilidade_dez_pessoas_pico))
print('Somatorio total de pessoas = {}'.format(fim_espediente))
print('Proxima Segunda Terao Aproximadamente: {}'.format(primeiros_30 + horario_de_pico + fim_espediente))
problema3()
#####################################################
# [11:30] : 6 Pessoas #
# [11:40.0] : 8 Pessoas #
# [11:50.0] : 8 Pessoas #
# [11:60.0] : 8 Pessoas #
# Somatorio total de pessoas = 30 #
# [13:10] : 15 Pessoas #
# [13:20.0] : 10 Pessoas #
# [13:30.0] : 15 Pessoas #
# [13:40.0] : 15 Pessoas #
# [13:50.0] : 13 Pessoas #
# [13:60.0] : 9 Pessoas #
# Somatorio total de pessoas = 77 #
# [13:10] : 3 Pessoas #
# [13:20.0] : 2 Pessoas #
# [13:30.0] : 2 Pessoas #
# [13:40.0] : 3 Pessoas #
# [13:50.0] : 3 Pessoas #
# [13:60.0] : 3 Pessoas #
# Somatorio total de pessoas = 16 #
# Proxima Segunda Terao Aproximadamente: 123 #
#####################################################
|
python
|
import rclpy
from rclpy.node import Node
import numpy as np
from .handeye_4dof import Calibrator4DOF, robot_pose_selector
from .handeye_4dof import transformations as tf
from geometry_msgs.msg import PoseArray
from geometry_msgs.msg import Pose
from std_srvs.srv import Empty
from ament_index_python.packages import get_package_share_directory
HANDEYE_4DOF_SHARE = get_package_share_directory('handeye_4dof_ros2')
class Handeye4Dof(Node):
def __init__(self):
super().__init__('handeye_4dof_node')
self.declare_parameters(
namespace='',
parameters=[
('base_to_hand_topic', 'base_to_robot'),
('robot_to_marker_topic', 'robot_to_marker'),
('calculate_nonlinear', False),
('antiparallel_screw_axes', True),
('calibrated_pose_topic', 'calibrated_pose')
]
)
base_to_hand_topic = self.get_parameter('base_to_hand_topic').value
robot_to_marker_topic = self.get_parameter('robot_to_marker_topic').value
self.calculate_nonlinear = self.get_parameter('calculate_nonlinear').value
self.antiparallel_screw_axes = self.get_parameter('antiparallel_screw_axes').value
calibrated_pose_topic = self.get_parameter('calibrated_pose_topic').value
self.base_to_hand = self.create_subscription(
PoseArray,
base_to_hand_topic,
self.base_to_hand_callback,
10)
self.robot_to_marker = self.create_subscription(
PoseArray,
robot_to_marker_topic,
self.camera_to_marker_callback,
10)
self.calibrated_pose = self.create_publisher(
Pose,
calibrated_pose_topic, 10)
self.calculate_calib_srv = self.create_service(Empty, 'calculate_calibration', self.calculate_calib_callback)
self.calibration_pose = Pose()
self.base_to_hand_list = []
self.camera_to_marker_list = []
# this is connected to an empty service to keep message types generic
def calculate_calib_callback(self, req, res):
if len(self.base_to_hand_list) > 0 and len(self.camera_to_marker_list) > 0:
self.calculate_handeye_calibration()
self.calibrated_pose.publish(self.calibration_pose)
else:
self.get_logger().info("Lists are empty!")
return res
# Takes in a pose array and converts it to a list of numpy arrays
def base_to_hand_callback(self, msg):
self.base_to_hand_list = []
for i in range(0, len(msg.poses)):
self.base_to_hand_list.append(self.convert_pose_to_transform_matrix(msg.poses[i]))
# Takes in a pose array and converts it to a list of numpy arrays
def camera_to_marker_callback(self, msg):
self.camera_to_marker_list = []
for i in range(0, len(msg.poses)):
self.camera_to_marker_list.append(self.convert_pose_to_transform_matrix(msg.poses[i]))
#convert ROS2 pose message to 4x4 transform matrix
def convert_pose_to_transform_matrix(self, pose):
quat_matrix = tf.quaternion_matrix([pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w])
matrix4x4 = np.array([[quat_matrix[0][0], quat_matrix[0][1], quat_matrix[0][2], pose.position.x],
[quat_matrix[1][0], quat_matrix[1][1], quat_matrix[1][2], pose.position.y],
[quat_matrix[2][0], quat_matrix[2][1], quat_matrix[2][2], pose.position.z],
[0, 0, 0, 1]])
return matrix4x4
# Obtain optimal motions as dual quaternions.
def calculate_handeye_calibration(self):
motions = robot_pose_selector(self.camera_to_marker_list, self.base_to_hand_list)
# Initialize calibrator with precomputed motions.
cb = Calibrator4DOF(motions)
dq_x = cb.calibrate(antiparallel_screw_axes=self.antiparallel_screw_axes)
# Hand to Camera TF obtained from handeye calibration.
hand_to_camera = np.linalg.inv(dq_x.as_transform())
quat = tf.quaternion_from_matrix(hand_to_camera)
# Hand to Camera TF obtained from post nonlinear refinement.
if self.calculate_nonlinear:
hand_to_camera = cb.nonlinear_refinement(self.base_to_hand_list, self.camera_to_marker_list, hand_to_camera)
quat = tf.quaternion_from_matrix(hand_to_camera)
# z-translation is an invalid number.
# tf transformations follow xyzw convention
calibration_pose = Pose()
calibration_pose.position.x = hand_to_camera[0][3]
calibration_pose.position.y = hand_to_camera[1][3]
calibration_pose.position.z = hand_to_camera[2][3]
calibration_pose.orientation.x = quat[0]
calibration_pose.orientation.y = quat[1]
calibration_pose.orientation.z = quat[2]
calibration_pose.orientation.w = quat[3]
self.calibration_pose = calibration_pose
print(calibration_pose)
def main(args=None):
rclpy.init(args=args)
handeye_4dof_node = Handeye4Dof()
rclpy.spin(handeye_4dof_node)
handeye_4dof_node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
|
python
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import json
import logging
import os
import shutil
def serialize_hyperparameters(hp):
return {str(k): json.dumps(v) for (k, v) in hp.items()}
def save_as_json(data, filename):
with open(filename, "wt") as f:
json.dump(data, f)
def file_exists(resource_folder, file_name):
return os.path.exists(os.path.join(resource_folder, file_name))
def create_config_files(program, s3_source_archive, path, additional_hp={}):
rc = {
"current_host": "algo-1",
"hosts": ["algo-1"]
}
hp = {'sagemaker_region': 'us-west-2',
'sagemaker_program': program,
'sagemaker_submit_directory': s3_source_archive,
'sagemaker_container_log_level': logging.INFO}
hp.update(additional_hp)
ic = {
"training": {"ContentType": "trainingContentType"},
"evaluation": {"ContentType": "evalContentType"},
"Validation": {}
}
write_conf_files(rc, hp, ic, path)
def write_conf_files(rc, hp, ic, path):
os.makedirs('{}/input/config'.format(path))
rc_file = os.path.join(path, 'input/config/resourceconfig.json')
hp_file = os.path.join(path, 'input/config/hyperparameters.json')
ic_file = os.path.join(path, 'input/config/inputdataconfig.json')
hp = serialize_hyperparameters(hp)
save_as_json(rc, rc_file)
save_as_json(hp, hp_file)
save_as_json(ic, ic_file)
def copy_resource(resource_path, opt_ml_path, relative_src_path, relative_dst_path=None):
if not relative_dst_path:
relative_dst_path = relative_src_path
shutil.copytree(os.path.join(resource_path, relative_src_path),
os.path.join(opt_ml_path, relative_dst_path))
|
python
|
from __future__ import unicode_literals
import os
import json
from functools import wraps
from datetime import datetime, date
from contextlib import contextmanager
from threading import RLock, Condition, current_thread
from collections import Sized, Iterable, Mapping, defaultdict
def is_listy(x):
"""
returns a boolean indicating whether the passed object is "listy",
which we define as a sized iterable which is not a map or string
"""
return isinstance(x, Sized) and isinstance(x, Iterable) and not isinstance(x, (Mapping, type(b''), type('')))
def listify(x):
"""
returns a list version of x if x is a non-string iterable, otherwise
returns a list with x as its only element
"""
return list(x) if is_listy(x) else [x]
class serializer(json.JSONEncoder):
"""
JSONEncoder subclass for plugins to register serializers for types.
Plugins should not need to instantiate this class directly, but
they are expected to call serializer.register() for new data types.
"""
_registry = {}
_datetime_format = '%Y-%m-%d %H:%M:%S.%f'
def default(self, o):
if type(o) in self._registry:
preprocessor = self._registry[type(o)]
else:
for klass, preprocessor in self._registry.items():
if isinstance(o, klass):
break
else:
raise json.JSONEncoder.default(self, o)
return preprocessor(o)
@classmethod
def register(cls, type, preprocessor):
"""
Associates a type with a preprocessor so that RPC handlers may
pass non-builtin JSON types. For example, Sideboard already
does the equivalent of
>>> serializer.register(datetime, lambda dt: dt.strftime('%Y-%m-%d %H:%M:%S.%f'))
This method raises an exception if you try to register a
preprocessor for a type which already has one.
:param type: the type you are registering
:param preprocessor: function which takes one argument which is
the value to serialize and returns a json-
serializable value
"""
assert type not in cls._registry, '{} already has a preprocessor defined'.format(type)
cls._registry[type] = preprocessor
serializer.register(date, lambda d: d.strftime('%Y-%m-%d'))
serializer.register(datetime, lambda dt: dt.strftime(serializer._datetime_format))
def cached_property(func):
"""decorator for making readonly, memoized properties"""
pname = "_" + func.__name__
@property
@wraps(func)
def caching(self, *args, **kwargs):
if not hasattr(self, pname):
setattr(self, pname, func(self, *args, **kwargs))
return getattr(self, pname)
return caching
def request_cached_property(func):
"""
Sometimes we want a property to be cached for the duration of a request,
with concurrent requests each having their own cached version. This does
that via the threadlocal class, such that each HTTP request CherryPy serves
and each RPC request served via websocket or JSON-RPC will have its own
cached value, which is cleared and then re-generated on later requests.
"""
from sideboard.lib import threadlocal
name = func.__module__ + '.' + func.__name__
@property
@wraps(func)
def with_caching(self):
val = threadlocal.get(name)
if val is None:
val = func(self)
threadlocal.set(name, val)
return val
return with_caching
class _class_property(property):
def __get__(self, cls, owner):
return self.fget.__get__(None, owner)()
def class_property(cls):
"""
For whatever reason, the @property decorator isn't smart enough to recognize
classmethods and behave differently on them than on instance methods. This
property may be used to create a class-level property, useful for singletons
and other one-per-class properties. Class properties are read-only.
"""
return _class_property(classmethod(cls))
def entry_point(func):
"""
Decorator used to define entry points for command-line scripts. Sideboard
ships with a "sep" (Sideboard Entry Point) command line script which can be
used to call into any plugin-defined entry point after deleting sys.argv[0]
so that the entry point name will be the first argument. For example, if a
plugin had this entry point:
@entry_point
def some_action():
print(sys.argv)
Then someone in a shell ran the command:
sep some_action foo bar
It would print:
['some_action', 'foo', 'bar']
:param func: a function which takes no arguments; its name will be the name
of the command, and an exception is raised if a function with
the same name has already been registered as an entry point
"""
assert func.__name__ not in _entry_points, 'An entry point named {} has already been implemented'.format(func.__name__)
_entry_points[func.__name__] = func
return func
_entry_points = {}
class RWGuard(object):
"""
This utility class provides the ability to perform read/write locking, such
that we can have any number of readers OR a single writer. We give priority
to writers, who will get the lock before any readers.
These locks are reentrant, meaning that the same thread can acquire a read
or write lock multiple times, and will then need to release the lock the
same number of times it was acquired. A thread with an acquired read lock
cannot acquire a write lock, or vice versa. Locks can only be released by
the threads which acquired them.
This class is named RWGuard rather than RWLock because it is not itself a
lock, e.g. it doesn't have an acquire method, it cannot be directly used as
a context manager, etc.
"""
def __init__(self):
self.lock = RLock()
self.waiting_writer_count = 0
self.acquired_writer = defaultdict(int)
self.acquired_readers = defaultdict(int)
self.ready_for_reads = Condition(self.lock)
self.ready_for_writes = Condition(self.lock)
@property
@contextmanager
def read_locked(self):
"""
Context manager which acquires a read lock on entrance and releases it
on exit. Any number of threads may acquire a read lock.
"""
self.acquire_for_read()
try:
yield
finally:
self.release()
@property
@contextmanager
def write_locked(self):
"""
Context manager which acquires a write lock on entrance and releases it
on exit. Only one thread may acquire a write lock at a time.
"""
self.acquire_for_write()
try:
yield
finally:
self.release()
def acquire_for_read(self):
"""
NOTE: consumers are encouraged to use the "read_locked" context manager
instead of this method where possible.
This method acquires the read lock for the current thread, blocking if
necessary until there are no other threads with the write lock acquired
or waiting for the write lock to be available.
"""
tid = current_thread().ident
assert tid not in self.acquired_writer, 'Threads which have already acquired a write lock may not lock for reading'
with self.lock:
while self.acquired_writer or (self.waiting_writer_count and tid not in self.acquired_readers):
self.ready_for_reads.wait()
self.acquired_readers[tid] += 1
def acquire_for_write(self):
"""
NOTE: consumers are encouraged to use the "write_locked" context manager
instead of this method where possible.
This method acquires the write lock for the current thread, blocking if
necessary until no other threads have the write lock acquired and no
thread has the read lock acquired.
"""
tid = current_thread().ident
assert tid not in self.acquired_readers, 'Threads which have already acquired a read lock may not lock for writing'
with self.lock:
while self.acquired_readers or (self.acquired_writer and tid not in self.acquired_writer):
self.waiting_writer_count += 1
self.ready_for_writes.wait()
self.waiting_writer_count -= 1
self.acquired_writer[tid] += 1
def release(self):
"""
Release the read or write lock held by the current thread. Since these
locks are reentrant, this method must be called once for each time the
lock was acquired. This method raises an exception if called by a
thread with no read or write lock acquired.
"""
tid = current_thread().ident
assert tid in self.acquired_readers or tid in self.acquired_writer, 'this thread does not hold a read or write lock'
with self.lock:
for counts in [self.acquired_readers, self.acquired_writer]:
counts[tid] -= 1
if counts[tid] <= 0:
del counts[tid]
wake_readers = not self.waiting_writer_count
wake_writers = self.waiting_writer_count and not self.acquired_readers
if wake_writers:
with self.ready_for_writes:
self.ready_for_writes.notify()
elif wake_readers:
with self.ready_for_reads:
self.ready_for_reads.notify_all()
|
python
|
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
from programy.security.authenticate.clientidauth import ClientIdAuthenticationService
from programy.config.brain.security import BrainSecurityAuthenticationConfiguration
from programy.bot import Bot
from programy.config.bot.bot import BotConfiguration
from programy.context import ClientContext
from programytest.client import TestClient
class MockClientIdAuthenticationService(ClientIdAuthenticationService):
def __init__(self, brain_config):
ClientIdAuthenticationService.__init__(self, brain_config)
self.should_authorised = False
self.raise_exception = False
def user_auth_service(self, context):
if self.raise_exception is True:
raise Exception("Bad thing happen!")
return self.should_authorised
class ClientIdAuthenticationServiceTests(unittest.TestCase):
def setUp(self):
client = TestClient()
self._client_context = ClientContext(client, "unknown")
self._client_context.bot = Bot(BotConfiguration(), client)
self._client_context.bot.configuration.conversations._max_histories = 3
self._client_context.brain = self._client_context.bot.brain
def test_init(self):
service = ClientIdAuthenticationService(BrainSecurityAuthenticationConfiguration())
self.assertIsNotNone(service)
self._client_context._userid = "console"
self.assertTrue(service.authenticate(self._client_context))
self._client_context._userid = "anyone"
self.assertFalse(service.authenticate(self._client_context))
def test_authorise_success(self):
service = MockClientIdAuthenticationService(BrainSecurityAuthenticationConfiguration())
service.should_authorised = True
self.assertTrue("console" in service.authorised)
self._client_context._userid = "console"
self.assertTrue(service.authenticate(self._client_context))
self.assertFalse("unknown" in service.authorised)
self._client_context._userid = "unknown"
self.assertTrue(service.authenticate(self._client_context))
self.assertTrue("unknown" in service.authorised)
def test_authorise_failure(self):
service = MockClientIdAuthenticationService(BrainSecurityAuthenticationConfiguration())
service.should_authorised = False
self.assertFalse("unknown" in service.authorised)
self.assertFalse(service.authenticate(self._client_context))
def test_authorise_exception(self):
service = MockClientIdAuthenticationService(BrainSecurityAuthenticationConfiguration())
service.should_authorised = True
service.raise_exception = True
self.assertFalse(service.authenticate(self._client_context._userid))
|
python
|
from dataclasses import dataclass, field
import backtrader as bt
from models.Analyzers.CustomReturns import CustomReturns_analyzer
from models.Analyzers.ReturnsVolatility import ReturnsVolatility_analyzer
from models.Analyzers.TradesInfo import TradesInfo_analyzer
from models.Analyzers.CalmarRatio import CalmarRatio_analyzer
from backtrader.analyzers import SharpeRatio_A
from backtrader.analyzers import TradeAnalyzer
from backtrader.analyzers import DrawDown
class FullMetrics_analyzer(bt.Analyzer):
def __init__(self):
self.custom_returns_analyzer = CustomReturns_analyzer()
self.ret_vol_analyzer = ReturnsVolatility_analyzer()
self.sharpe_ratio_analyzer = SharpeRatio_A()
self.trade_analyzer = TradeAnalyzer()
self.dd_analyzer = DrawDown()
self.trades_info_analyzer = TradesInfo_analyzer()
self.calmar_analyzer = CalmarRatio_analyzer()
def get_analysis(self):
ann_ret = self.custom_returns_analyzer.get_analysis()["ann_ret"]
ret_vol = self.ret_vol_analyzer.get_analysis()["volatility"]
sharpe_ratio = self.sharpe_ratio_analyzer.get_analysis()["sharperatio"]
calmar_ratio = self.calmar_analyzer.get_analysis()["calmar_ratio"]
trade_analysis = self.trade_analyzer.get_analysis()
pnlcomm = trade_analysis["pnl"]["net"]["total"]
pnl = trade_analysis["pnl"]["gross"]["total"]
fees = pnl - pnlcomm
open_trades_nb = trade_analysis.total.open
close_trades_nb = trade_analysis.total.closed
close_shorts_nb = trade_analysis.short.total
close_longs_nb = trade_analysis.long.total
avg_return = self.trades_info_analyzer.get_analysis()["avg_return"]
avg_return_short = self.trades_info_analyzer.get_analysis()["avg_return_short"]
avg_return_long = self.trades_info_analyzer.get_analysis()["avg_return_long"]
winrate = trade_analysis.won.total / close_trades_nb
len_in_market = trade_analysis.len.total
average_trade_len = trade_analysis.len.average
longest_trade_len = trade_analysis.len.max
average_won_len = trade_analysis.len.won.average
average_lost_len = trade_analysis.len.lost.average
drawdown_analysis = self.dd_analyzer.get_analysis()
average_drawdown = drawdown_analysis["drawdown"]
average_drawdown_length = drawdown_analysis["len"]
max_drawdown = drawdown_analysis["max"]["drawdown"]
max_drawdown_length = drawdown_analysis["max"]["len"]
return {
"Annual returns": ann_ret,
"PNL net": pnlcomm,
"Fees": fees,
"Winrate": winrate,
"Total trades": close_trades_nb,
"Total long": close_longs_nb,
"Total short": close_shorts_nb,
"Open trades": open_trades_nb,
"Average return per trade": avg_return,
"Average return per long": avg_return_long,
"Average return per short": avg_return_short,
"Time in market": len_in_market,
"Average trade len": average_trade_len,
"Max trade len": longest_trade_len,
"Average won len": average_won_len,
"Average lost len": average_lost_len,
"Average drawdown": average_drawdown,
"Average drawdown length": average_drawdown_length,
"Max drawdown": max_drawdown,
"Max drawdown length": max_drawdown_length,
"Annualized Sharpe ratio": sharpe_ratio,
"Calmar ratio": calmar_ratio,
"Returns volatility": ret_vol,
}
@dataclass
class FullMetrics:
"""
FullMetrics analyzer.
Default name is "custom_returns"
"""
analyzer: FullMetrics_analyzer = FullMetrics_analyzer
parameters: dict = field(default_factory=lambda: {'_name': "full_metrics"})
|
python
|
def unorder_search(item, items: list):
if not isinstance(items, list):
print(f'The items {items} MUST be of type list')
return
for i in range(0, len(items)):
if item == items[i]:
return i
return None
data = [87, 47, 23, 53, 20, 56, 6, 19, 8, 41]
print(unorder_search(20, data))
|
python
|
# -*- coding: utf-8 -*-
import re
from xkeysnail.transform import *
define_keymap(lambda wm_class: wm_class != 'Gnome-terminal', {
K('LSuper-V'): K('C-V'),
K('LSuper-X'): K('C-X'),
K('LSuper-C'): K('C-C'),
K('LSuper-A'): K('C-A')
}, 'non-terminal')
define_keymap(re.compile('Gnome-terminal'), {
K('LSuper-V'): K('C-Shift-V'),
K('LSuper-X'): K('C-Shift-X'),
K('LSuper-C'): K('C-Shift-C'),
K('LSuper-A'): K('C-A')
}, 'terminal')
define_multipurpose_modmap({
Key.LEFT_ALT: [Key.MUHENKAN, Key.LEFT_ALT],
Key.RIGHT_ALT: [Key.HENKAN, Key.RIGHT_ALT]
})
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 2 14:24:56 2020
@author: rucha
"""
import pandas as pd
import numpy as np
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
# Read dataset from csv
dataset = pd.read_csv("merged.csv")
dataset= dataset[dataset.sign != 2]
label = dataset['sign']
data = dataset.drop(['Cell', 'sign'], axis=1)
print ("Total number of rows in dataset: {}\n".format(len(dataset)))
print(dataset.head())
# Features
#features = ['Day','Month','Year','Humidity','Max Temperature','Min Temperature',
# 'Rainfall','Sea Level Pressure','Sunshine','Wind Speed']
#target = 'Cloud'
#select features from csv file to remove redunant features
#can be commented if you want to use all features
feature_list = pd.read_csv('selected_features.csv', index_col=0)
feature_list = feature_list.values.tolist()
feature_list = [k[0] for k in feature_list]
data = data[feature_list]
x_train, x_test, y_train, y_test = train_test_split(data, label,
train_size=0.7, test_size=0.3, shuffle=False)
# Print samples after running train_test_split
print("X_train: {}, Y_train: {}".format(len(x_train), len(x_test)))
print("X_train: {}, Y_train: {}".format(len(y_train), len(y_test)))
print("\n")
# Multinomial Naive Bayes Model setup after parameter tuning
model = MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
model.fit(x_train, y_train)
# Print results to evaluate model
print("Showing Performance Metrics for Naive Bayes Multinomial\n")
print ("Training Accuracy: {}".format(model.score(x_train, y_train)))
predicted = model.predict(x_test)
print ("Testing Accuracy: {}".format(accuracy_score(y_test, predicted)))
print("\n")
print("Cross Validation Accuracy: \n")
cv_accuracy = cross_val_score(estimator=model, X=x_train, y=y_train, cv=10)
print("Accuracy using 10 folds: ")
print(cv_accuracy)
print("\n")
print("Mean accuracy: {}".format(cv_accuracy.mean()))
print("Standard Deviation: {}".format(cv_accuracy.std()))
print("\n")
print("Confusion Matrix for Naive Bayes Multinomial\n")
labels = [0, 1, 2]
cm = confusion_matrix(y_test, predicted, labels=labels)
print(cm)
print("\n")
print('Precision, Recall and f-1 Scores for Naive Bayes Multinomial\n')
print(classification_report(y_test, predicted))
|
python
|
from django.contrib import admin
from django import forms
from django.db import models
from django.utils.translation import ugettext as _
from medicament.models import *
class GroupAdmin(admin.ModelAdmin):
fieldsets = [(None, {'fields': ['name', 'adverse_reaction']})]
list_display = ('name', 'adverse_reaction')
search_fields = ('name', 'adverse_reaction')
ordering = ('id',)
admin.site.register(Group, GroupAdmin)
class ComponentAdmin(admin.ModelAdmin):
fieldsets = [(None, {'fields':
['name', 'kind_component', 'groups']})]
list_display = ('name', 'kind_component', )
search_fields = ('name', 'kind_component',)
list_filter = ['kind_component']
ordering = ('kind_component', 'name',)
formfield_overrides = {
models.ManyToManyField: {'widget': forms.CheckboxSelectMultiple},
}
admin.site.register(Component, ComponentAdmin)
|
python
|