content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
#!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
class dictionary:
def __init__(self):
self.paths = ['/phpMyAdmin/',
'/phpmyadmin/',
'/PMA/',
'/admin/',
'/dbadmin/',
'/mysql/',
'/myadmin/',
'/phpmyadmin2/',
'/phpMyAdmin2/',
'/phpMyAdmin-2/',
'/php-my-admin/',
'/phpMyAdmin-2.2.3/',
'/phpMyAdmin-2.2.6/',
'/phpMyAdmin-2.5.1/',
'/phpMyAdmin-2.5.4/',
'/phpMyAdmin-2.5.5-rc1/',
'/phpMyAdmin-2.5.5-rc2/',
'/phpMyAdmin-2.5.5/',
'/phpMyAdmin-2.5.5-pl1/',
'/phpMyAdmin-2.5.6-rc1/',
'/phpMyAdmin-2.5.6-rc2/',
'/phpMyAdmin-2.5.6/',
'/phpMyAdmin-2.5.7/',
'/phpMyAdmin-2.5.7-pl1/',
'/phpMyAdmin-2.6.0-alpha/',
'/phpMyAdmin-2.6.0-alpha2/',
'/phpMyAdmin-2.6.0-beta1/',
'/phpMyAdmin-2.6.0-beta2/',
'/phpMyAdmin-2.6.0-rc1/',
'/phpMyAdmin-2.6.0-rc2/',
'/phpMyAdmin-2.6.0-rc3/',
'/phpMyAdmin-2.6.0/',
'/phpMyAdmin-2.6.0-pl1/',
'/phpMyAdmin-2.6.0-pl2/',
'/phpMyAdmin-2.6.0-pl3/',
'/phpMyAdmin-2.6.1-rc1/',
'/phpMyAdmin-2.6.1-rc2/',
'/phpMyAdmin-2.6.1/',
'/phpMyAdmin-2.6.1-pl1/',
'/phpMyAdmin-2.6.1-pl2/',
'/phpMyAdmin-2.6.1-pl3/',
'/phpMyAdmin-2.6.2-rc1/',
'/phpMyAdmin-2.6.2-beta1/',
'/phpMyAdmin-2.6.2-rc1/',
'/phpMyAdmin-2.6.2/',
'/phpMyAdmin-2.6.2-pl1/',
'/phpMyAdmin-2.6.3/',
'/phpMyAdmin-2.6.3-rc1/',
'/phpMyAdmin-2.6.3/',
'/phpMyAdmin-2.6.3-pl1/',
'/phpMyAdmin-2.6.4-rc1/',
'/phpMyAdmin-2.6.4-pl1/',
'/phpMyAdmin-2.6.4-pl2/',
'/phpMyAdmin-2.6.4-pl3/',
'/phpMyAdmin-2.6.4-pl4/',
'/phpMyAdmin-2.6.4/',
'/phpMyAdmin-2.7.0-beta1/',
'/phpMyAdmin-2.7.0-rc1/',
'/phpMyAdmin-2.7.0-pl1/',
'/phpMyAdmin-2.7.0-pl2/',
'/phpMyAdmin-2.7.0/',
'/phpMyAdmin-2.8.0-beta1/',
'/phpMyAdmin-2.8.0-rc1/',
'/phpMyAdmin-2.8.0-rc2/',
'/phpMyAdmin-2.8.0/',
'/phpMyAdmin-2.8.0.1/',
'/phpMyAdmin-2.8.0.2/',
'/phpMyAdmin-2.8.0.3/',
'/phpMyAdmin-2.8.0.4/',
'/phpMyAdmin-2.8.1-rc1/',
'/phpMyAdmin-2.8.1/',
'/phpMyAdmin-2.8.2/',
'/sqlmanager/',
'/mysqlmanager/',
'/p/m/a/',
'/PMA2005/',
'/pma2005/',
'/phpmanager/',
'/php-myadmin/',
'/phpmy-admin/',
'/webadmin/',
'/sqlweb/',
'/websql/',
'/webdb/',
'/mysqladmin/',
'/mysql-admin/']
|
python
|
#!/usr/bin/env python
#############################################################
# ubi_reader/ubi_io
# (c) 2013 Jason Pruitt ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#############################################################
from ubi.block import sort
class ubi_file(object):
"""UBI image file object
Arguments:
Str:path -- Path to file to parse
Int:block_size -- Erase block size of NAND in bytes.
Int:start_offset -- (optional) Where to start looking in the file for
UBI data.
Int:end_offset -- (optional) Where to stop looking in the file.
Methods:
seek -- Put file head to specified byte offset.
Int:offset
read -- Read specified bytes from file handle.
Int:size
tell -- Returns byte offset of current file location.
read_block -- Returns complete PEB data of provided block
description.
Obj:block
read_block_data -- Returns LEB data only from provided block.
Obj:block
reader -- Generator that returns data from file.
reset -- Reset file position to start_offset
Handles all the actual file interactions, read, seek,
extract blocks, etc.
"""
def __init__(self, path, block_size, start_offset=0, end_offset=None):
self._fhandle = open(path, 'rb')
self._start_offset = start_offset
if end_offset:
self._end_offset = end_offset
else:
self._fhandle.seek(0,2)
self._end_offset = self.tell()
self._block_size = block_size
if start_offset >= self._end_offset:
raise Exception('Start offset larger than file size!')
self._fhandle.seek(self._start_offset)
def _set_start(self, i):
self._start_offset = i
def _get_start(self):
return self._start_offset
start_offset = property(_get_start, _set_start)
def _get_end(self):
return self._end_offset
end_offset = property(_get_end)
def _get_block_size(self):
return self._block_size
block_size = property(_get_block_size)
def seek(self, offset):
self._fhandle.seek(offset)
def read(self, size):
return self._fhandle.read(size)
def tell(self):
return self._fhandle.tell()
def reset(self):
self._fhandle.seek(self.start_offset)
def reader(self):
self.reset()
while True:
cur_loc = self._fhandle.tell()
if self.end_offset and cur_loc > self.end_offset:
break
elif self.end_offset and self.end_offset - cur_loc < self.block_size:
chunk_size = self.end_offset - cur_loc
else:
chunk_size = self.block_size
buf = self.read(chunk_size)
if not buf:
break
yield buf
def read_block(self, block):
"""Read complete PEB data from file.
Argument:
Obj:block -- Block data is desired for.
"""
self.seek(block.file_offset)
return self._fhandle.read(block.size)
def read_block_data(self, block):
"""Read LEB data from file
Argument:
Obj:block -- Block data is desired for.
"""
self.seek(block.file_offset + block.ec_hdr.data_offset)
buf = self._fhandle.read(block.size - block.ec_hdr.data_offset - block.vid_hdr.data_pad)
return buf
class leb_virtual_file():
def __init__(self, ubi, volume):
self._ubi = ubi
self._volume = volume
self._blocks = sort.by_leb(self._volume.get_blocks(self._ubi.blocks))
self._seek = 0
self.leb_data_size = len(self._blocks) * self._ubi.leb_size
self._last_leb = -1
self._last_buf = ''
def read(self, i):
buf = ''
leb = int(self.tell() / self._ubi.leb_size)
offset = self.tell() % self._ubi.leb_size
if leb == self._last_leb:
self.seek(self.tell() + i)
return self._last_buf[offset:offset+i]
else:
buf = self._ubi.file.read_block_data(self._ubi.blocks[self._blocks[leb]])
self._last_buf = buf
self._last_leb = leb
self.seek(self.tell() + i)
return buf[offset:offset+i]
def reset(self):
self.seek(0)
def seek(self, offset):
self._seek = offset
def tell(self):
return self._seek
def reader(self):
last_leb = 0
for block in self._blocks:
while 0 != (self._ubi.blocks[block].leb_num - last_leb):
last_leb += 1
yield '\xff'*self._ubi.leb_size
last_leb += 1
yield self._ubi.file.read_block_data(self._ubi.blocks[block])
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from codegen.spec import *
from codegen.mir_emitter import *
from codegen.isel import *
from codegen.x64_def import *
from codegen.matcher import *
class X64OperandFlag(IntFlag):
NO_FLAG = auto()
# GOT_ABSOLUTE_ADDRESS - On a symbol operand = auto() this represents a
# relocation of:
# SYMBOL_LABEL + [. - PICBASELABEL]
GOT_ABSOLUTE_ADDRESS = auto()
# PIC_BASE_OFFSET - On a symbol operand this indicates that the
# immediate should get the value of the symbol minus the PIC base label:
# SYMBOL_LABEL - PICBASELABEL
PIC_BASE_OFFSET = auto()
# GOT - On a symbol operand this indicates that the immediate is the
# offset to the GOT entry for the symbol name from the base of the GOT.
#
# See the X86-64 ELF ABI supplement for more details.
# SYMBOL_LABEL @GOT
GOT = auto()
# GOTOFF - On a symbol operand this indicates that the immediate is
# the offset to the location of the symbol name from the base of the GOT.
#
# See the X86-64 ELF ABI supplement for more details.
# SYMBOL_LABEL @GOTOFF
GOTOFF = auto()
# GOTPCREL - On a symbol operand this indicates that the immediate is
# offset to the GOT entry for the symbol name from the current code
# location.
#
# See the X86-64 ELF ABI supplement for more details.
# SYMBOL_LABEL @GOTPCREL
GOTPCREL = auto()
# PLT - On a symbol operand this indicates that the immediate is
# offset to the PLT entry of symbol name from the current code location.
#
# See the X86-64 ELF ABI supplement for more details.
# SYMBOL_LABEL @PLT
PLT = auto()
# TLSGD - On a symbol operand this indicates that the immediate is
# the offset of the GOT entry with the TLS index structure that contains
# the module number and variable offset for the symbol. Used in the
# general dynamic TLS access model.
#
# See 'ELF Handling for Thread-Local Storage' for more details.
# SYMBOL_LABEL @TLSGD
TLSGD = auto()
# TLSLD - On a symbol operand this indicates that the immediate is
# the offset of the GOT entry with the TLS index for the module that
# contains the symbol. When this index is passed to a call to
# __tls_get_addr = auto() the function will return the base address of the TLS
# block for the symbol. Used in the x86-64 local dynamic TLS access model.
#
# See 'ELF Handling for Thread-Local Storage' for more details.
# SYMBOL_LABEL @TLSLD
TLSLD = auto()
# TLSLDM - On a symbol operand this indicates that the immediate is
# the offset of the GOT entry with the TLS index for the module that
# contains the symbol. When this index is passed to a call to
# ___tls_get_addr = auto() the function will return the base address of the TLS
# block for the symbol. Used in the IA32 local dynamic TLS access model.
#
# See 'ELF Handling for Thread-Local Storage' for more details.
# SYMBOL_LABEL @TLSLDM
TLSLDM = auto()
# GOTTPOFF - On a symbol operand this indicates that the immediate is
# the offset of the GOT entry with the thread-pointer offset for the
# symbol. Used in the x86-64 initial exec TLS access model.
#
# See 'ELF Handling for Thread-Local Storage' for more details.
# SYMBOL_LABEL @GOTTPOFF
GOTTPOFF = auto()
# INDNTPOFF - On a symbol operand this indicates that the immediate is
# the absolute address of the GOT entry with the negative thread-pointer
# offset for the symbol. Used in the non-PIC IA32 initial exec TLS access
# model.
#
# See 'ELF Handling for Thread-Local Storage' for more details.
# SYMBOL_LABEL @INDNTPOFF
INDNTPOFF = auto()
# TPOFF - On a symbol operand this indicates that the immediate is
# the thread-pointer offset for the symbol. Used in the x86-64 local
# exec TLS access model.
#
# See 'ELF Handling for Thread-Local Storage' for more details.
# SYMBOL_LABEL @TPOFF
TPOFF = auto()
# DTPOFF - On a symbol operand this indicates that the immediate is
# the offset of the GOT entry with the TLS offset of the symbol. Used
# in the local dynamic TLS access model.
#
# See 'ELF Handling for Thread-Local Storage' for more details.
# SYMBOL_LABEL @DTPOFF
DTPOFF = auto()
# NTPOFF - On a symbol operand this indicates that the immediate is
# the negative thread-pointer offset for the symbol. Used in the IA32
# local exec TLS access model.
#
# See 'ELF Handling for Thread-Local Storage' for more details.
# SYMBOL_LABEL @NTPOFF
NTPOFF = auto()
# GOTNTPOFF - On a symbol operand this indicates that the immediate is
# the offset of the GOT entry with the negative thread-pointer offset for
# the symbol. Used in the PIC IA32 initial exec TLS access model.
#
# See 'ELF Handling for Thread-Local Storage' for more details.
# SYMBOL_LABEL @GOTNTPOFF
GOTNTPOFF = auto()
# DLLIMPORT - On a symbol operand "FOO" = auto() this indicates that the
# reference is actually to the "__imp_FOO" symbol. This is used for
# dllimport linkage on windows.
DLLIMPORT = auto()
# DARWIN_NONLAZY - On a symbol operand "FOO" = auto() this indicates that the
# reference is actually to the "FOO$non_lazy_ptr" symbol = auto() which is a
# non-PIC-base-relative reference to a non-hidden dyld lazy pointer stub.
DARWIN_NONLAZY = auto()
# DARWIN_NONLAZY_PIC_BASE - On a symbol operand "FOO" = auto() this indicates
# that the reference is actually to "FOO$non_lazy_ptr - PICBASE" = auto() which is
# a PIC-base-relative reference to a non-hidden dyld lazy pointer stub.
DARWIN_NONLAZY_PIC_BASE = auto()
# TLVP - On a symbol operand this indicates that the immediate is
# some TLS offset.
#
# This is the TLS offset for the Darwin TLS mechanism.
TLVP = auto()
# TLVP_PIC_BASE - On a symbol operand this indicates that the immediate
# is some TLS offset from the picbase.
#
# This is the 32-bit TLS offset for Darwin TLS in PIC mode.
TLVP_PIC_BASE = auto()
# SECREL - On a symbol operand this indicates that the immediate is
# the offset from beginning of section.
#
# This is the TLS offset for the COFF/Windows TLS mechanism.
SECREL = auto()
# ABS8 - On a symbol operand this indicates that the symbol is known
# to be an absolute symbol in range [0 = auto()128) = auto() so we can use the @ABS8
# symbol modifier.
ABS8 = auto()
# COFFSTUB - On a symbol operand "FOO" = auto() this indicates that the
# reference is actually to the ".refptr.FOO" symbol. This is used for
# stub symbols on windows.
COFFSTUB = auto()
def is_null_constant(value):
return isinstance(value.node, ConstantDagNode) and value.node.is_zero
def is_null_fp_constant(value):
return isinstance(value.node, ConstantFPDagNode) and value.node.is_zero
def is_x86_zero(value):
return is_null_constant(value) or is_null_fp_constant(value)
class X64InstructionSelector(InstructionSelector):
def __init__(self):
super().__init__()
def lower_wrapper_rip(self, node, dag):
noreg = MachineRegister(NOREG)
MVT = MachineValueType
ty = node.value_types[0]
base = DagValue(dag.add_register_node(
MVT(ValueType.I64), MachineRegister(RIP)), 0)
scale = DagValue(dag.add_target_constant_node(MVT(ValueType.I8), 1), 0)
index = DagValue(dag.add_register_node(MVT(ValueType.I32), noreg), 0)
disp = node.operands[0]
segment = DagValue(dag.add_register_node(MVT(ValueType.I16), noreg), 0)
lea_ops = (base, scale, index, disp, segment)
if ty == MachineValueType(ValueType.I64):
lea_operand = X64MachineOps.LEA64r
elif ty == MachineValueType(ValueType.I32):
lea_operand = X64MachineOps.LEA32r
else:
raise ValueError()
return dag.add_machine_dag_node(lea_operand, node.value_types, *lea_ops)
def get_memory_operands(self, dag: Dag, operand: DagValue):
assert(isinstance(operand, DagValue))
noreg = MachineRegister(NOREG)
MVT = MachineValueType
if operand.node.opcode == VirtualDagOps.ADD:
sub_op1 = operand.node.operands[0]
sub_op2 = operand.node.operands[1]
if sub_op2.node.opcode in [VirtualDagOps.CONSTANT, VirtualDagOps.TARGET_CONSTANT]:
if sub_op1.node.opcode == X64DagOps.WRAPPER_RIP:
base = DagValue(dag.add_target_register_node(
MVT(ValueType.I64), RIP), 0)
assert(sub_op2.node.value == 0)
disp = sub_op1.node.operands[0]
else:
base = sub_op1
disp = sub_op2
elif sub_op1.node.opcode in [VirtualDagOps.CONSTANT, VirtualDagOps.TARGET_CONSTANT]:
if sub_op2.node.opcode == X64DagOps.WRAPPER_RIP:
base = DagValue(dag.add_target_register_node(
MVT(ValueType.I64), RIP), 0)
assert(sub_op1.node.value == 0)
disp = sub_op2.node.operands[0]
else:
base = sub_op2
disp = sub_op1
else:
raise ValueError()
scale = DagValue(dag.add_target_constant_node(
MVT(ValueType.I8), 1), 0)
index = DagValue(dag.add_register_node(
MVT(ValueType.I32), noreg), 0)
segment = DagValue(dag.add_register_node(
MVT(ValueType.I16), noreg), 0)
assert(base.node.opcode != X64DagOps.WRAPPER_RIP)
return (base, scale, index, disp, segment)
elif operand.node.opcode == VirtualDagOps.SUB:
sub_op1 = operand.node.operands[0]
sub_op2 = operand.node.operands[1]
if sub_op2.node.opcode == VirtualDagOps.CONSTANT:
base = sub_op1
disp = DagValue(dag.add_target_constant_node(
sub_op2.node.value_types[0], -sub_op2.node.value), 0)
elif sub_op1.node.opcode == VirtualDagOps.CONSTANT:
base = sub_op2
disp = DagValue(dag.add_target_constant_node(
sub_op1.node.value_ty[0], -sub_op1.node.value), 0)
elif sub_op1.node.opcode == VirtualDagOps.CONSTANT:
base = operand
disp = DagValue(dag.add_target_constant_node(
MVT(ValueType.I32), 0), 0)
scale = DagValue(dag.add_target_constant_node(
MVT(ValueType.I8), 1), 0)
index = DagValue(dag.add_register_node(
MVT(ValueType.I32), noreg), 0)
segment = DagValue(dag.add_register_node(
MVT(ValueType.I16), noreg), 0)
assert(base.node.opcode != X64DagOps.WRAPPER_RIP)
return (base, scale, index, disp, segment)
elif operand.node.opcode == X64DagOps.WRAPPER_RIP:
base = DagValue(dag.add_register_node(
MVT(ValueType.I64), MachineRegister(RIP)), 0)
scale = DagValue(dag.add_target_constant_node(
MVT(ValueType.I8), 1), 0)
index = DagValue(dag.add_register_node(
MVT(ValueType.I32), noreg), 0)
disp = operand.node.operands[0]
segment = DagValue(dag.add_register_node(
MVT(ValueType.I16), noreg), 0)
assert(base.node.opcode != X64DagOps.WRAPPER_RIP)
return (base, scale, index, disp, segment)
elif operand.node.opcode == X64DagOps.WRAPPER:
base = DagValue(dag.add_register_node(
MVT(ValueType.I64), MachineRegister(RIP)), 0)
scale = DagValue(dag.add_target_constant_node(
MVT(ValueType.I8), 1), 0)
index = DagValue(dag.add_register_node(
MVT(ValueType.I32), noreg), 0)
disp = operand.node.operands[0]
segment = DagValue(dag.add_register_node(
MVT(ValueType.I16), noreg), 0)
return (base, scale, index, disp, segment)
elif operand.node.opcode == VirtualDagOps.FRAME_INDEX:
base = DagValue(dag.add_frame_index_node(
operand.ty, operand.node.index, True), 0)
scale = DagValue(dag.add_target_constant_node(
MVT(ValueType.I8), 1), 0)
index = DagValue(dag.add_register_node(
MVT(ValueType.I32), noreg), 0)
disp = DagValue(dag.add_target_constant_node(
MVT(ValueType.I32), 0), 0)
segment = DagValue(dag.add_register_node(
MVT(ValueType.I16), noreg), 0)
assert(base.node.opcode != X64DagOps.WRAPPER_RIP)
return (base, scale, index, disp, segment)
raise ValueError()
def select_srl(self, node: DagNode, dag: Dag, new_ops):
op1 = new_ops[0]
op2 = new_ops[1]
if isinstance(op1.node, DagNode) and isinstance(op2.node, ConstantDagNode):
return dag.add_machine_dag_node(X64MachineOps.SHR32ri, node.value_types, op1, op2)
elif isinstance(op1.node, DagNode) and isinstance(op2.node, DagNode):
subreg_idx_node = dag.add_target_constant_node(
MachineValueType(ValueType.I32), subregs.index(sub_8bit))
extract_subreg_node = DagValue(dag.add_node(TargetDagOps.EXTRACT_SUBREG, [MachineValueType(ValueType.I8)],
op2, DagValue(subreg_idx_node, 0)), 0)
cl = DagValue(dag.add_target_register_node(
MachineValueType(ValueType.I8), CL), 0)
if op1.ty == MachineValueType(ValueType.I32):
opcode = X64MachineOps.SHR32rCL
elif op1.ty == MachineValueType(ValueType.I64):
opcode = X64MachineOps.SHR64rCL
else:
raise ValueError()
copy_to_cl_node = DagValue(dag.add_node(VirtualDagOps.COPY_TO_REG, [MachineValueType(ValueType.OTHER), MachineValueType(ValueType.GLUE)],
dag.entry, cl, extract_subreg_node), 0)
return dag.add_machine_dag_node(opcode, node.value_types, op1, copy_to_cl_node.get_value(1))
print("select_and")
print([edge.node for edge in new_ops])
raise NotImplementedError()
def select_sra(self, node: DagNode, dag: Dag, new_ops):
op1 = new_ops[0]
op2 = new_ops[1]
if isinstance(op1.node, DagNode) and isinstance(op2.node, ConstantDagNode):
return dag.add_machine_dag_node(X64MachineOps.SAR32ri, node.value_types, op1, op2)
elif isinstance(op1.node, DagNode) and isinstance(op2.node, DagNode):
subreg_idx_node = dag.add_target_constant_node(
MachineValueType(ValueType.I32), subregs.index(sub_8bit))
extract_subreg_node = DagValue(dag.add_node(TargetDagOps.EXTRACT_SUBREG, [MachineValueType(ValueType.I8)],
op2, DagValue(subreg_idx_node, 0)), 0)
cl = DagValue(dag.add_target_register_node(
MachineValueType(ValueType.I8), CL), 0)
if op1.ty == MachineValueType(ValueType.I32):
opcode = X64MachineOps.SAR32rCL
elif op1.ty == MachineValueType(ValueType.I64):
opcode = X64MachineOps.SAR64rCL
else:
raise ValueError()
copy_to_cl_node = DagValue(dag.add_node(VirtualDagOps.COPY_TO_REG, [MachineValueType(ValueType.OTHER), MachineValueType(ValueType.GLUE)],
dag.entry, cl, extract_subreg_node), 0)
return dag.add_machine_dag_node(opcode, node.value_types, op1, copy_to_cl_node.get_value(1))
print("select_and")
print([edge.node for edge in new_ops])
raise NotImplementedError()
def select_shl(self, node: DagNode, dag: Dag, new_ops):
op1 = new_ops[0]
op2 = new_ops[1]
if isinstance(op1.node, DagNode) and isinstance(op2.node, ConstantDagNode):
return dag.add_machine_dag_node(X64MachineOps.SHL32ri, node.value_types, op1, op2)
elif isinstance(op1.node, DagNode) and isinstance(op2.node, DagNode):
subreg_idx_node = DagValue(dag.add_target_constant_node(
MachineValueType(ValueType.I32), subregs.index(sub_8bit)), 0)
extract_subreg_node = DagValue(dag.add_node(TargetDagOps.EXTRACT_SUBREG, [MachineValueType(ValueType.I8)],
op2, subreg_idx_node), 0)
cl = DagValue(dag.add_target_register_node(
MachineValueType(ValueType.I8), CL), 0)
if op1.ty == MachineValueType(ValueType.I32):
opcode = X64MachineOps.SHL32rCL
elif op1.ty == MachineValueType(ValueType.I64):
opcode = X64MachineOps.SHL64rCL
else:
raise ValueError()
copy_to_cl_node = DagValue(dag.add_node(VirtualDagOps.COPY_TO_REG, [MachineValueType(ValueType.OTHER), MachineValueType(ValueType.GLUE)],
dag.entry, cl, extract_subreg_node), 0)
return dag.add_machine_dag_node(opcode, node.value_types, op1, copy_to_cl_node.get_value(1))
print("select_and")
print([edge.node for edge in new_ops])
raise NotImplementedError()
def select_bitcast(self, node: DagNode, dag: Dag, new_ops):
src = new_ops[0]
raise NotImplementedError()
def select_trunc(self, node: DagNode, dag: Dag, new_ops):
src = new_ops[0]
dst_ty = node.value_types[0]
if isinstance(src.node, DagNode):
if dst_ty.value_type == ValueType.I8:
subreg_idx = subregs.index(sub_8bit)
elif dst_ty.value_type == ValueType.I16:
subreg_idx = subregs.index(sub_16bit)
elif dst_ty.value_type == ValueType.I32:
subreg_idx = subregs.index(sub_32bit)
subreg_idx_node = DagValue(dag.add_target_constant_node(
MachineValueType(ValueType.I32), subreg_idx), 0)
extract_subreg_node = dag.add_node(TargetDagOps.EXTRACT_SUBREG, node.value_types,
src, subreg_idx_node)
return extract_subreg_node
raise NotImplementedError()
def select_callseq_start(self, node: DagNode, dag: Dag, new_ops):
chain = new_ops[0]
in_bytes = new_ops[1]
out_bytes = new_ops[2]
opt = dag.add_target_constant_node(MachineValueType(ValueType.I32), 0)
return dag.add_machine_dag_node(X64MachineOps.ADJCALLSTACKDOWN32, node.value_types, in_bytes, out_bytes, DagValue(opt, 0), chain)
def select_callseq_end(self, node: DagNode, dag: Dag, new_ops):
chain = new_ops[0]
in_bytes = new_ops[1]
out_bytes = new_ops[2]
glue = self.get_glue(new_ops)
ops = [in_bytes, out_bytes, chain]
if glue:
ops.append(glue)
return dag.add_machine_dag_node(X64MachineOps.ADJCALLSTACKUP32, node.value_types, *ops)
def get_glue(self, operands):
for operand in operands:
if operand.ty == MachineValueType(ValueType.GLUE):
return operand
return None
def select_call(self, node: DagNode, dag: Dag, new_ops):
chain = new_ops[0]
target = new_ops[1]
glue = self.get_glue(new_ops)
ops = [target, chain]
if glue:
ops.append(glue)
return dag.add_machine_dag_node(X64MachineOps.CALLpcrel32, node.value_types, *ops)
def select_return(self, node: DagNode, dag: Dag, new_ops):
chain = new_ops[0]
ops = new_ops[1:]
return dag.add_machine_dag_node(X64MachineOps.RET, node.value_types, *ops, chain)
def select_divrem(self, node: DagNode, dag: Dag, new_ops):
op1 = new_ops[0]
op2 = new_ops[1]
is_signed = node.opcode == VirtualDagOps.SDIVREM
if isinstance(op1.node, FrameIndexDagNode) and isinstance(op2.node, ConstantDagNode):
raise NotImplementedError()
elif isinstance(op1.node, DagNode):
if isinstance(op2.node, DagNode):
pass
elif isinstance(op2.node, ConstantDagNode):
op2 = DagValue(dag.add_target_constant_node(
op2.ty, op2.node.value), 0)
else:
raise NotImplementedError()
ty = op1.ty
if is_signed:
if ty == MachineValueType(ValueType.I8):
opcode = X64MachineOps.IDIV8r
elif ty == MachineValueType(ValueType.I16):
opcode = X64MachineOps.IDIV16r
elif ty == MachineValueType(ValueType.I32):
opcode = X64MachineOps.IDIV32r
elif ty == MachineValueType(ValueType.I64):
opcode = X64MachineOps.IDIV64r
else:
raise NotImplementedError()
else:
if ty == MachineValueType(ValueType.I8):
opcode = X64MachineOps.DIV8r
elif ty == MachineValueType(ValueType.I16):
opcode = X64MachineOps.DIV16r
elif ty == MachineValueType(ValueType.I32):
opcode = X64MachineOps.DIV32r
elif ty == MachineValueType(ValueType.I64):
opcode = X64MachineOps.DIV64r
else:
raise NotImplementedError()
if ty == MachineValueType(ValueType.I8):
lo_reg = DagValue(dag.add_target_register_node(ty, AL), 0)
hi_reg = DagValue(dag.add_target_register_node(ty, AH), 0)
elif ty == MachineValueType(ValueType.I16):
lo_reg = DagValue(dag.add_target_register_node(ty, AX), 0)
hi_reg = DagValue(dag.add_target_register_node(ty, DX), 0)
elif ty == MachineValueType(ValueType.I32):
lo_reg = DagValue(dag.add_target_register_node(ty, EAX), 0)
hi_reg = DagValue(dag.add_target_register_node(ty, EDX), 0)
elif ty == MachineValueType(ValueType.I64):
lo_reg = DagValue(dag.add_target_register_node(ty, RAX), 0)
hi_reg = DagValue(dag.add_target_register_node(ty, RDX), 0)
else:
raise NotImplementedError()
if is_signed:
copy_to_lo_node = DagValue(dag.add_node(VirtualDagOps.COPY_TO_REG, [MachineValueType(ValueType.OTHER), MachineValueType(ValueType.GLUE)],
dag.entry, lo_reg, op1), 1)
copy_to_hi_node = DagValue(dag.add_machine_dag_node(X64MachineOps.CDQ, [MachineValueType(ValueType.GLUE)],
copy_to_lo_node), 0)
divrem_node = DagValue(dag.add_machine_dag_node(
opcode, [MachineValueType(ValueType.GLUE)], op2, copy_to_hi_node), 0)
q_node = DagValue(dag.add_node(VirtualDagOps.COPY_FROM_REG, [lo_reg.ty],
dag.entry, lo_reg, divrem_node), 0)
r_node = DagValue(dag.add_node(VirtualDagOps.COPY_FROM_REG, [hi_reg.ty],
dag.entry, hi_reg, divrem_node), 0)
else:
zero_value = DagValue(dag.add_target_constant_node(ty, 0), 0)
if ty == MachineValueType(ValueType.I8):
mov_ri_opcode = X64MachineOps.MOV8ri
elif ty == MachineValueType(ValueType.I16):
mov_ri_opcode = X64MachineOps.MOV16ri
elif ty == MachineValueType(ValueType.I32):
mov_ri_opcode = X64MachineOps.MOV32ri
elif ty == MachineValueType(ValueType.I64):
mov_ri_opcode = X64MachineOps.MOV64ri
else:
raise NotImplementedError()
zero_value = DagValue(
dag.add_machine_dag_node(mov_ri_opcode, [ty], zero_value), 0)
copy_to_lo_node = DagValue(dag.add_node(VirtualDagOps.COPY_TO_REG, [MachineValueType(ValueType.OTHER), MachineValueType(ValueType.GLUE)],
dag.entry, lo_reg, op1), 1)
copy_to_hi_node = DagValue(dag.add_node(VirtualDagOps.COPY_TO_REG, [MachineValueType(ValueType.OTHER), MachineValueType(ValueType.GLUE)],
dag.entry, hi_reg, zero_value, copy_to_lo_node), 1)
divrem_node = DagValue(dag.add_machine_dag_node(
opcode, [MachineValueType(ValueType.GLUE)], op2, copy_to_hi_node), 0)
q_node = DagValue(dag.add_node(VirtualDagOps.COPY_FROM_REG, [lo_reg.ty],
dag.entry, lo_reg, divrem_node), 0)
r_node = DagValue(dag.add_node(VirtualDagOps.COPY_FROM_REG, [hi_reg.ty],
dag.entry, hi_reg, divrem_node), 0)
return q_node.node
print("select_divrem")
print([edge.node for edge in new_ops])
raise NotImplementedError()
def select_copy_from_reg(self, node: DagNode, dag: Dag, new_ops):
return node
# return dag.add_machine_dag_node(VirtualDagOps.COPY_FROM_REG, node.value_types, *new_ops)
chain = new_ops[0]
src = new_ops[1]
if isinstance(src.node, RegisterDagNode):
return src.node
print("select_copy_from_reg")
print([edge.node for edge in new_ops])
raise NotImplementedError()
def select_copy_to_reg(self, node: DagNode, dag: Dag, new_ops):
chain = node.operands[0]
dest = node.operands[1]
src = node.operands[2]
if src.node.opcode == VirtualDagOps.CONSTANT:
src = DagValue(self.select_constant(src.node, dag, []), 0)
elif src.node.opcode == VirtualDagOps.FRAME_INDEX:
lea_ops = self.get_memory_operands(dag, src)
if src.ty == MachineValueType(ValueType.I64):
lea_operand = X64MachineOps.LEA64r
elif src.ty == MachineValueType(ValueType.I32):
lea_operand = X64MachineOps.LEA32r
else:
raise ValueError()
src = DagValue(dag.add_machine_dag_node(
lea_operand, [src.ty], *lea_ops), 0)
glue = self.get_glue(new_ops)
ops = [chain, dest, src]
if glue:
ops.append(glue)
return dag.add_node(VirtualDagOps.COPY_TO_REG, node.value_types, *ops)
def select_code(self, node: DagNode, dag: Dag):
ops_table = [op for op in X64MachineOps.insts()]
value = DagValue(node, 0)
def match_node(inst: MachineInstructionDef):
for pattern in inst.patterns:
_, res = pattern.match(None, [value], 0, dag)
if res:
return construct(inst, node, dag, res)
return None
for op in ops_table:
matched = match_node(op)
if matched:
return matched
for pattern in x64_patterns:
_, res = pattern.match(node, dag)
if res:
return pattern.construct(node, dag, res).node
return None
def select_constant(self, node: DagNode, dag: Dag, new_ops):
value = DagValue(dag.add_target_constant_node(
node.value_types[0], node.value), 0)
ops = [value]
if node.value_types[0] == MachineValueType(ValueType.I64):
operand = X64MachineOps.MOV64ri
elif node.value_types[0] == MachineValueType(ValueType.I32):
operand = X64MachineOps.MOV32ri
elif node.value_types[0] == MachineValueType(ValueType.I8):
operand = X64MachineOps.MOV8ri
else:
raise ValueError()
return dag.add_machine_dag_node(operand, node.value_types, *ops)
def select_scalar_to_vector(self, node: DagNode, dag: Dag, new_ops):
in_type = node.operands[0].ty
out_type = node.value_types[0]
if in_type == MachineValueType(ValueType.F32) and out_type == MachineValueType(ValueType.V4F32):
regclass_id = regclasses.index(VR128)
regclass_id_val = DagValue(dag.add_target_constant_node(
MachineValueType(ValueType.I32), regclass_id), 0)
return dag.add_node(TargetDagOps.COPY_TO_REGCLASS, node.value_types, node.operands[0], regclass_id_val)
raise ValueError()
def select(self, node: DagNode, dag: Dag):
new_ops = node.operands
if isinstance(node.opcode, TargetDagOps):
return node
matched = self.select_code(node, dag)
if matched:
return matched
reg_info = dag.mfunc.target_info.get_register_info()
SELECT_TABLE = {
VirtualDagOps.COPY_FROM_REG: self.select_copy_from_reg,
VirtualDagOps.COPY_TO_REG: self.select_copy_to_reg,
VirtualDagOps.SRL: self.select_srl,
VirtualDagOps.SHL: self.select_shl,
VirtualDagOps.SRA: self.select_sra,
VirtualDagOps.SDIVREM: self.select_divrem,
VirtualDagOps.UDIVREM: self.select_divrem,
VirtualDagOps.BITCAST: self.select_bitcast,
VirtualDagOps.TRUNCATE: self.select_trunc,
VirtualDagOps.CALLSEQ_START: self.select_callseq_start,
VirtualDagOps.CALLSEQ_END: self.select_callseq_end,
VirtualDagOps.SCALAR_TO_VECTOR: self.select_scalar_to_vector,
X64DagOps.CALL: self.select_call,
X64DagOps.RETURN: self.select_return,
}
if node.opcode == VirtualDagOps.ZERO_EXTEND:
src_ty = node.operands[0].ty
dst_ty = node.value_types[0]
if src_ty == MachineValueType(ValueType.I32) and dst_ty == MachineValueType(ValueType.I64):
if dst_ty == MachineValueType(ValueType.I64):
zero_val = DagValue(dag.add_machine_dag_node(
X64MachineOps.MOV64r0, [dst_ty]), 0)
if src_ty.value_type == ValueType.I8:
subreg_idx = subregs.index(sub_8bit)
elif src_ty.value_type == ValueType.I16:
subreg_idx = subregs.index(sub_16bit)
elif src_ty.value_type == ValueType.I32:
subreg_idx = subregs.index(sub_32bit)
subreg_idx_node = DagValue(dag.add_target_constant_node(
MachineValueType(ValueType.I32), subreg_idx), 0)
regclass_id = x64_regclasses.index(GR64)
regclass_id_val = DagValue(
dag.add_target_constant_node(MachineValueType(ValueType.I32), regclass_id), 0)
return dag.add_node(TargetDagOps.SUBREG_TO_REG, [dst_ty], zero_val, node.operands[0], subreg_idx_node)
if node.opcode == VirtualDagOps.ENTRY:
return dag.entry.node
elif node.opcode == VirtualDagOps.UNDEF:
return node
elif node.opcode == VirtualDagOps.CONDCODE:
return node
elif node.opcode == VirtualDagOps.BASIC_BLOCK:
return node
elif node.opcode == VirtualDagOps.REGISTER:
return node
elif node.opcode == VirtualDagOps.TARGET_CONSTANT:
return node
elif node.opcode == VirtualDagOps.TARGET_CONSTANT_POOL:
return node
elif node.opcode == VirtualDagOps.TARGET_FRAME_INDEX:
return node
elif node.opcode == VirtualDagOps.TARGET_REGISTER:
return node
elif node.opcode == VirtualDagOps.TARGET_GLOBAL_ADDRESS:
return node
elif node.opcode == VirtualDagOps.TARGET_GLOBAL_TLS_ADDRESS:
return node
elif node.opcode == VirtualDagOps.TARGET_EXTERNAL_SYMBOL:
return node
elif node.opcode == VirtualDagOps.INLINEASM:
return node
elif node.opcode == VirtualDagOps.EXTERNAL_SYMBOL:
return dag.add_external_symbol_node(node.value_types[0], node.symbol, True)
elif node.opcode == VirtualDagOps.MERGE_VALUES:
return dag.add_node(node.opcode, node.value_types, *new_ops)
elif node.opcode == VirtualDagOps.TOKEN_FACTOR:
return dag.add_node(node.opcode, node.value_types, *new_ops)
elif node.opcode == X64DagOps.WRAPPER_RIP:
return self.lower_wrapper_rip(node, dag)
elif node.opcode == X64DagOps.WRAPPER:
return node
elif node.opcode == VirtualDagOps.TARGET_CONSTANT_FP:
return node
elif node.opcode in SELECT_TABLE:
select_func = SELECT_TABLE[node.opcode]
minst = select_func(node, dag, new_ops)
else:
raise NotImplementedError(
"Can't select the instruction: {}".format(node.opcode))
return minst
class X86CallingConv(CallingConv):
def __init__(self):
pass
@property
def id(self):
return CallingConvID.C
def can_lower_return(self, func: Function):
return_size, align = func.module.data_layout.get_type_size_in_bits(
func.vty.return_ty)
return return_size / 8 <= 16
def lower_return(self, builder: DagBuilder, inst: ReturnInst, g: Dag):
mfunc = builder.mfunc
calling_conv = mfunc.target_info.get_calling_conv()
reg_info = mfunc.target_info.get_register_info()
data_layout = builder.data_layout
demote_reg = builder.func_info.sret_reg
has_demote_arg = demote_reg is not None
stack_pop_bytes = builder.get_value(ConstantInt(0, i32))
if len(inst.operands) > 0:
return_offsets = []
return_vts = compute_value_types(
inst.block.func.return_ty, data_layout, return_offsets)
returns = []
offset_in_arg = 0
# Analyze return value
for val_idx, vt in enumerate(return_vts):
reg_vt = reg_info.get_register_type(vt)
reg_count = reg_info.get_register_count(vt)
for reg_idx in range(reg_count):
flags = CCArgFlags()
returns.append(CallingConvReturn(
vt, reg_vt, 0, offset_in_arg, flags))
offset_in_arg += reg_vt.get_size_in_byte()
# Apply caling convention
ccstate = CallingConvState(calling_conv, mfunc)
ccstate.compute_returns_layout(returns)
# Handle return values
ret_parts = []
ret_value = builder.get_value(inst.rs)
idx = 0
for val_idx, vt in enumerate(return_vts):
reg_vt = reg_info.get_register_type(vt)
reg_count = reg_info.get_register_count(vt)
if reg_count > 1:
raise NotImplementedError()
ret_parts.append(ret_value.get_value(idx))
idx += reg_count
reg_vals = []
for idx, ret_val in enumerate(ccstate.values):
assert(isinstance(ret_val, CCArgReg))
ret_vt = ret_val.loc_vt
reg_val = DagValue(
g.add_target_register_node(ret_vt, ret_val.reg), 0)
copy_val = ret_parts[idx]
builder.root = get_copy_to_parts(
copy_val, [reg_val], ret_vt, builder.root, builder.g)
reg_vals.append(reg_val)
ops = [builder.root, stack_pop_bytes, *reg_vals]
else:
ops = [builder.root, stack_pop_bytes]
if has_demote_arg:
return_ty = inst.block.func.ty
vts = compute_value_types(
return_ty, inst.block.func.module.data_layout)
assert(len(vts) == 1)
assert(len(demote_reg) == 1)
ret_val = DagValue(
builder.g.add_register_node(vts[0], demote_reg[0]), 0)
if ret_val.ty == MachineValueType(ValueType.I32):
ret_reg = EAX
elif ret_val.ty == MachineValueType(ValueType.I64):
ret_reg = RAX
else:
raise NotImplementedError()
reg_node = DagValue(
g.add_target_register_node(ret_val.ty, ret_reg), 0)
node = g.add_copy_to_reg_node(reg_node, ret_val)
builder.root = DagValue(node, 0)
ops = [builder.root, stack_pop_bytes, reg_node]
node = g.add_node(X64DagOps.RETURN, [
MachineValueType(ValueType.OTHER)], *ops)
builder.root = DagValue(node, 0)
return node
def compute_type_size_aligned(self, ty, data_layout: DataLayout):
return data_layout.get_type_size_in_bits(ty)
def lower_call(self, builder: DagBuilder, inst: CallInst, g: Dag):
mfunc = builder.mfunc
func = inst.callee
calling_conv = mfunc.target_info.get_calling_conv()
reg_info = mfunc.target_info.get_register_info()
data_layout = builder.data_layout
target_lowering = mfunc.target_info.get_lowering()
ptr_ty = target_lowering.get_frame_index_type(data_layout)
is_vararg = func.is_variadic
is_win64 = mfunc.target_info.triple.os == OS.Windows and mfunc.target_info.triple.arch == ArchType.X86_64
# Handle arguments
args = []
for i, arg in enumerate(inst.args):
vts = compute_value_types(arg.ty, data_layout)
offset_in_arg = 0
for val_idx, vt in enumerate(vts):
reg_vt = reg_info.get_register_type(vt)
reg_count = reg_info.get_register_count(vt)
for reg_idx in range(reg_count):
flags = CCArgFlags()
args.append(CallingConvArg(
vt, reg_vt, i, offset_in_arg, flags))
offset_in_arg += reg_vt.get_size_in_byte()
ccstate = CallingConvState(calling_conv, mfunc)
ccstate.compute_arguments_layout(args)
stack_offset = align_to(ccstate.stack_offset, ccstate.stack_maxalign)
# Estimate stack size to call function
data_layout = builder.data_layout
stack_bytes = 32
for arg in inst.args:
size, align = self.compute_type_size_aligned(arg.ty, data_layout)
arg_size = int(size / 8)
arg_align = int(align / 8)
stack_bytes += arg_size
in_bytes = g.add_target_constant_node(
MachineValueType(ValueType.I32), stack_bytes)
out_bytes = g.add_target_constant_node(
MachineValueType(ValueType.I32), 0)
callseq_start_node = g.add_node(VirtualDagOps.CALLSEQ_START, [
MachineValueType(ValueType.OTHER)], builder.root, DagValue(in_bytes, 0), DagValue(out_bytes, 0))
builder.root = DagValue(callseq_start_node, 0)
stack_ptr_type = MachineValueType(ValueType.I64)
esp_reg_node = g.add_target_register_node(stack_ptr_type, RSP)
esp = g.add_copy_from_reg_node(
stack_ptr_type, DagValue(esp_reg_node, 0))
##
arg_parts = []
for arg in inst.args:
idx = 0
arg_value = builder.get_value(arg)
vts = compute_value_types(arg.ty, data_layout)
for val_idx, vt in enumerate(vts):
reg_vt = reg_info.get_register_type(vt)
reg_count = reg_info.get_register_count(vt)
if reg_count > 1:
raise NotImplementedError()
arg_parts.append(arg_value.get_value(idx))
idx += reg_count
chain = g.root
reg_vals = []
arg_vals = []
regs_to_pass = []
for idx, arg_val in enumerate(ccstate.values):
if isinstance(arg_val, CCArgReg):
reg_val = DagValue(g.add_target_register_node(
arg_val.vt, arg_val.reg), 0)
copy_val = arg_parts[idx]
if arg_val.loc_info == CCArgLocInfo.Full:
pass
elif arg_val.loc_info == CCArgLocInfo.Indirect:
arg_mem_size = arg_val.vt.get_size_in_byte()
arg_mem_align = int(data_layout.get_pref_type_alignment(
arg_val.vt.get_ir_type()) / 8)
arg_mem_frame_idx = mfunc.frame.create_stack_object(
arg_mem_size, arg_mem_align)
arg_mem_val = DagValue(builder.g.add_frame_index_node(
ptr_ty, arg_mem_frame_idx), 0)
chain = DagValue(g.add_store_node(
chain, arg_mem_val, copy_val), 0)
copy_val = arg_mem_val
else:
raise ValueError()
arg_vals.append(copy_val)
reg_vals.append(reg_val)
regs_to_pass.append((reg_val, copy_val))
if is_vararg and is_win64:
shadow_reg = None
if arg_val.reg == XMM0:
shadow_reg = RCX
elif arg_val.reg == XMM1:
shadow_reg = RDX
elif arg_val.reg == XMM2:
shadow_reg = R8
elif arg_val.reg == XMM3:
shadow_reg = R9
if shadow_reg:
reg_val = DagValue(g.add_target_register_node(
arg_val.vt, shadow_reg), 0)
regs_to_pass.append((reg_val, copy_val))
else:
assert(isinstance(arg_val, CCArgMem))
copy_val = arg_parts[idx]
ptr_val = DagValue(g.add_target_register_node(
ptr_ty, RSP), 0)
ptr_offset_val = DagValue(
g.add_constant_node(ptr_ty, (32 + arg_val.offset)), 0)
ptr_val = DagValue(
g.add_node(VirtualDagOps.ADD, [ptr_ty], ptr_val, ptr_offset_val), 0)
chain = DagValue(g.add_store_node(
chain, ptr_val, copy_val), 0)
copy_to_reg_chain = None
for reg_val, copy_val in regs_to_pass:
operands = [chain, reg_val, copy_val]
if copy_to_reg_chain:
operands.append(copy_to_reg_chain.get_value(1))
copy_to_reg_chain = DagValue(builder.g.add_node(VirtualDagOps.COPY_TO_REG, [MachineValueType(
ValueType.OTHER), MachineValueType(ValueType.GLUE)], *operands), 0)
func_address = builder.get_or_create_global_address(inst.callee, True)
ops = [chain, func_address]
if len(ccstate.values) > 0:
ops.append(copy_to_reg_chain.get_value(1))
call_node = DagValue(g.add_node(
X64DagOps.CALL, [MachineValueType(ValueType.OTHER), MachineValueType(ValueType.GLUE)], *ops), 0)
ops = [call_node.get_value(0), DagValue(in_bytes, 0), DagValue(
out_bytes, 0), call_node.get_value(1)]
callseq_end_node = DagValue(g.add_node(VirtualDagOps.CALLSEQ_END, [
MachineValueType(ValueType.OTHER), MachineValueType(ValueType.GLUE)], *ops), 0)
chain = callseq_end_node.get_value(0)
builder.root = chain
# Handle returns
return_offsets = []
return_vts = compute_value_types(inst.ty, data_layout, return_offsets)
returns = []
if not self.can_lower_return(func):
raise NotImplementedError()
else:
offset_in_arg = 0
for val_idx, vt in enumerate(return_vts):
reg_vt = reg_info.get_register_type(vt)
reg_count = reg_info.get_register_count(vt)
for reg_idx in range(reg_count):
flags = CCArgFlags()
returns.append(CallingConvReturn(
vt, reg_vt, 0, offset_in_arg, flags))
offset_in_arg += reg_vt.get_size_in_byte()
ccstate = CallingConvState(calling_conv, mfunc)
ccstate.compute_returns_layout(returns)
glue_val = callseq_end_node.get_value(1)
# Handle return values
ret_vals = []
for idx, ret_val in enumerate(ccstate.values):
assert(isinstance(ret_val, CCArgReg))
reg = MachineRegister(ret_val.reg)
reg_node = DagValue(
builder.g.add_register_node(ret_val.loc_vt, reg), 0)
ret_val_node = DagValue(builder.g.add_node(VirtualDagOps.COPY_FROM_REG, [
ret_val.loc_vt, MachineValueType(ValueType.GLUE)], chain, reg_node, glue_val), 0)
glue_val = ret_val_node.get_value(1)
ret_vals.append(ret_val_node)
ret_parts = []
idx = 0
for val_idx, vt in enumerate(return_vts):
reg_vt = reg_info.get_register_type(vt)
reg_count = reg_info.get_register_count(vt)
if reg_count > 1:
raise NotImplementedError()
ret_parts.append(ret_vals[idx])
idx += reg_count
if len(ret_parts) == 0:
return None
return builder.g.add_merge_values(ret_parts)
def allocate_return_x64_cdecl(self, idx, vt: MachineValueType, loc_vt, loc_info, flags: CCArgFlags, ccstate: CallingConvState):
if loc_vt.value_type == ValueType.I1:
loc_vt = MachineValueType(ValueType.I8)
if loc_vt.value_type == ValueType.I8:
regs = [AL, DL, CL]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type == ValueType.I16:
regs = [AX, CX, DX]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type == ValueType.I32:
regs = [EAX, ECX, EDX]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type == ValueType.I64:
regs = [RAX, RCX, RDX]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type in [ValueType.F32, ValueType.F64]:
regs = [XMM0, XMM1, XMM2]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type in [ValueType.V4F32]:
regs = [XMM0, XMM1, XMM2]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
raise NotImplementedError("The type is unsupporting.")
def allocate_return_win64_cdecl(self, idx, vt: MachineValueType, loc_vt, loc_info, flags: CCArgFlags, ccstate: CallingConvState):
if loc_vt.value_type == ValueType.I1:
loc_vt = MachineValueType(ValueType.I8)
if loc_vt.value_type == ValueType.I8:
regs = [AL, DL, CL]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type == ValueType.I16:
regs = [AX, CX, DX]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type == ValueType.I32:
regs = [EAX, ECX, EDX]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type == ValueType.I64:
regs = [RAX, RCX, RDX]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type in [ValueType.F32, ValueType.F64]:
regs = [XMM0, XMM1, XMM2]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type in [ValueType.V4F32]:
regs = [XMM0, XMM1, XMM2]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
raise NotImplementedError("The type is unsupporting.")
def allocate_return(self, idx, vt: MachineValueType, loc_vt, loc_info, flags: CCArgFlags, ccstate: CallingConvState):
target_info = ccstate.mfunc.target_info
if target_info.triple.os == OS.Windows and target_info.is_64bit_mode:
self.allocate_return_win64_cdecl(
idx, vt, loc_vt, loc_info, flags, ccstate)
return
self.allocate_return_x64_cdecl(
idx, vt, loc_vt, loc_info, flags, ccstate)
def allocate_argument_x64_cdecl(self, idx, vt: MachineValueType, loc_vt, loc_info, flags: CCArgFlags, ccstate: CallingConvState):
if loc_vt.value_type in [ValueType.I1, ValueType.I8, ValueType.I16]:
loc_vt = MachineValueType(ValueType.I32)
if loc_vt.value_type == ValueType.I32:
regs = [EDI, ESI, EDX, ECX, R8D, R9D]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type == ValueType.I64:
regs = [RDI, RSI, RDX, RCX, R8, R9]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type in [ValueType.F32, ValueType.F64, ValueType.F128]:
regs = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type in [ValueType.V4F32]:
regs = [XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]
reg = ccstate.alloc_reg_from_list(regs)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type in [ValueType.I32, ValueType.I64]:
stack_offset = ccstate.alloc_stack(8, 8)
ccstate.assign_stack_value(
idx, vt, loc_vt, loc_info, stack_offset, flags)
return False
raise NotImplementedError("The type is unsupporting.")
def allocate_argument_win64_cdecl(self, idx, vt: MachineValueType, loc_vt, loc_info, flags: CCArgFlags, ccstate: CallingConvState):
if loc_vt.value_type in [ValueType.V4F32]:
loc_vt = MachineValueType(ValueType.I64)
loc_info = CCArgLocInfo.Indirect
if loc_vt.value_type == ValueType.I8:
regs1 = [CL, DL, R8B, R9B]
regs2 = [XMM0, XMM1, XMM2, XMM3]
reg = ccstate.alloc_reg_from_list(regs1, regs2)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type == ValueType.I16:
regs1 = [CX, DX, R8W, R9W]
regs2 = [XMM0, XMM1, XMM2, XMM3]
reg = ccstate.alloc_reg_from_list(regs1, regs2)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type == ValueType.I32:
regs1 = [ECX, EDX, R8D, R9D]
regs2 = [XMM0, XMM1, XMM2, XMM3]
reg = ccstate.alloc_reg_from_list(regs1, regs2)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type == ValueType.I64:
regs1 = [RCX, RDX, R8, R9]
regs2 = [XMM0, XMM1, XMM2, XMM3]
reg = ccstate.alloc_reg_from_list(regs1, regs2)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type in [ValueType.F32, ValueType.F64]:
regs1 = [XMM0, XMM1, XMM2, XMM3]
regs2 = [RCX, RDX, R8, R9]
reg = ccstate.alloc_reg_from_list(regs1, regs2)
if reg is not None:
ccstate.assign_reg_value(idx, vt, loc_vt, loc_info, reg, flags)
return False
if loc_vt.value_type in [ValueType.I8, ValueType.I16, ValueType.I32, ValueType.I64, ValueType.F32, ValueType.F64]:
stack_offset = ccstate.alloc_stack(8, 8)
ccstate.assign_stack_value(
idx, vt, loc_vt, loc_info, stack_offset, flags)
return False
raise NotImplementedError("The type is unsupporting.")
def allocate_argument(self, idx, vt: MachineValueType, loc_vt, loc_info, flags: CCArgFlags, ccstate: CallingConvState):
target_info = ccstate.mfunc.target_info
if target_info.triple.os == OS.Windows and target_info.is_64bit_mode:
self.allocate_argument_win64_cdecl(
idx, vt, loc_vt, loc_info, flags, ccstate)
return
self.allocate_argument_x64_cdecl(
idx, vt, loc_vt, loc_info, flags, ccstate)
class X64TargetInstInfo(TargetInstInfo):
def __init__(self):
super().__init__()
def copy_phys_reg(self, src_reg, dst_reg, kill_src, inst: MachineInstruction):
assert(isinstance(src_reg, MachineRegister))
assert(isinstance(dst_reg, MachineRegister))
def is_hreg(reg):
return reg in [AH, BH, CH, DH]
opcode = None
if src_reg.spec in GR64.regs and dst_reg.spec in GR64.regs:
opcode = X64MachineOps.MOV64rr
elif src_reg.spec in GR32.regs and dst_reg.spec in GR32.regs:
opcode = X64MachineOps.MOV32rr
elif src_reg.spec in GR16.regs and dst_reg.spec in GR16.regs:
opcode = X64MachineOps.MOV16rr
elif src_reg.spec in GR8.regs and dst_reg.spec in GR8.regs:
if is_hreg(src_reg.spec) or is_hreg(dst_reg.spec):
opcode = X64MachineOps.MOV8rr
else:
opcode = X64MachineOps.MOV8rr
elif src_reg.spec in VR128.regs and dst_reg.spec in VR128.regs:
opcode = X64MachineOps.MOVAPSrr
elif src_reg.spec in FR64.regs and dst_reg.spec in FR64.regs:
opcode = X64MachineOps.MOVSDrr
elif src_reg.spec in FR32.regs and dst_reg.spec in FR32.regs:
opcode = X64MachineOps.MOVSSrr
elif src_reg.spec in VR128.regs:
if dst_reg.spec in GR64.regs:
opcode = X64MachineOps.MOVPQIto64rr
if not opcode:
raise NotImplementedError(
"Move instructions support GR64 or GR32 at the present time.")
copy_inst = MachineInstruction(opcode)
copy_inst.add_reg(dst_reg, RegState.Define)
if opcode in [X64MachineOps.MOVSSrr, X64MachineOps.MOVSDrr]:
copy_inst.add_reg(dst_reg, RegState.Non)
copy_inst.add_reg(src_reg, RegState.Kill if kill_src else RegState.Non)
copy_inst.insert_after(inst)
return copy_inst
def copy_reg_to_stack(self, reg, stack_slot, regclass, inst: MachineInstruction):
hwmode = inst.mbb.func.target_info.hwmode
tys = regclass.get_types(hwmode)
align = int(regclass.align / 8)
size = tys[0].get_size_in_bits()
size = int(int((size + 7) / 8))
def has_reg_regclass(reg, regclass):
if isinstance(reg, MachineVirtualRegister):
return reg.regclass == regclass
else:
return reg.spec in regclass.regs
if size == 1:
if has_reg_regclass(reg, GR8):
opcode = X64MachineOps.MOV8mr
elif size == 2:
if has_reg_regclass(reg, GR16):
opcode = X64MachineOps.MOV16mr
elif size == 4:
if has_reg_regclass(reg, GR32):
opcode = X64MachineOps.MOV32mr
elif has_reg_regclass(reg, FR32):
opcode = X64MachineOps.MOVSSmr
elif size == 8:
if has_reg_regclass(reg, GR64):
opcode = X64MachineOps.MOV64mr
elif has_reg_regclass(reg, FR64):
opcode = X64MachineOps.MOVSDmr
elif size == 16:
if has_reg_regclass(reg, VR128):
opcode = X64MachineOps.MOVAPSmr
else:
raise NotImplementedError(
"Move instructions support GR64 or GR32 at the present time.")
copy_inst = MachineInstruction(opcode)
noreg = MachineRegister(NOREG)
copy_inst.add_frame_index(stack_slot) # base
copy_inst.add_imm(1) # scale
copy_inst.add_reg(noreg, RegState.Non) # index
copy_inst.add_imm(0) # disp
copy_inst.add_reg(noreg, RegState.Non) # segment
copy_inst.add_reg(reg, RegState.Non)
copy_inst.insert_before(inst)
return copy_inst
def copy_reg_from_stack(self, reg, stack_slot, regclass, inst: MachineInstruction):
hwmode = inst.mbb.func.target_info.hwmode
tys = regclass.get_types(hwmode)
align = int(regclass.align / 8)
size = tys[0].get_size_in_bits()
size = int(int((size + 7) / 8))
def has_reg_regclass(reg, regclass):
if isinstance(reg, MachineVirtualRegister):
return reg.regclass == regclass
else:
return reg.spec in regclass.regs
if size == 1:
if has_reg_regclass(reg, GR8):
opcode = X64MachineOps.MOV8rm
elif size == 2:
if has_reg_regclass(reg, GR16):
opcode = X64MachineOps.MOV16rm
elif size == 4:
if has_reg_regclass(reg, GR32):
opcode = X64MachineOps.MOV32rm
elif has_reg_regclass(reg, FR32):
opcode = X64MachineOps.MOVSSrm
elif size == 8:
if has_reg_regclass(reg, GR64):
opcode = X64MachineOps.MOV64rm
elif has_reg_regclass(reg, FR64):
opcode = X64MachineOps.MOVSDrm
elif size == 16:
if has_reg_regclass(reg, VR128):
opcode = X64MachineOps.MOVAPSrm
else:
raise NotImplementedError(
"Move instructions support GR64 or GR32 at the present time.")
copy_inst = MachineInstruction(opcode)
noreg = MachineRegister(NOREG)
copy_inst.add_reg(reg, RegState.Define)
copy_inst.add_frame_index(stack_slot) # base
copy_inst.add_imm(1) # scale
copy_inst.add_reg(noreg, RegState.Non) # index
copy_inst.add_imm(0) # disp
copy_inst.add_reg(noreg, RegState.Non) # segment
copy_inst.insert_before(inst)
return copy_inst
def calculate_frame_offset(self, func: MachineFunction, idx):
slot_size = 8
frame = func.frame
stack_obj = func.frame.get_stack_object(idx)
frame_lowering = func.target_info.get_frame_lowering()
if idx < 0:
return stack_obj.offset + frame_lowering.frame_spill_size
return stack_obj.offset
def eliminate_frame_index(self, func: MachineFunction, inst: MachineInstruction, idx):
# Analyze the frame index into a base register and a displacement.
operand = inst.operands[idx]
if isinstance(operand, MOFrameIndex):
base_reg = MachineRegister(RBP)
stack_obj = func.frame.get_stack_object(operand.index)
offset = self.calculate_frame_offset(func, operand.index)
inst.operands[idx] = MOReg(base_reg, RegState.Non)
inst.operands[idx + 3] = MOImm(inst.operands[idx + 3].val + offset)
def optimize_compare_inst(self, func: MachineFunction, inst: MachineInstruction):
# Eliminate destination register.
reginfo = func.reg_info
if reginfo.is_use_empty(inst.operands[0].reg):
if inst.opcode == X64MachineOps.SUB8ri:
inst.opcode = X64MachineOps.CMP8ri
elif inst.opcode == X64MachineOps.SUB32ri:
inst.opcode = X64MachineOps.CMP32ri
elif inst.opcode == X64MachineOps.SUB32rm:
inst.opcode = X64MachineOps.CMP32rm
elif inst.opcode == X64MachineOps.SUB32rr:
inst.opcode = X64MachineOps.CMP32rr
elif inst.opcode == X64MachineOps.SUB64rr:
inst.opcode = X64MachineOps.CMP64rr
else:
raise ValueError("Not supporting instruction.")
remove_op = inst.operands[0]
if remove_op.tied_to >= 0:
tied = inst.operands[remove_op.tied_to]
assert(tied.tied_to == 0)
tied.tied_to = -1
inst.remove_operand(0)
def expand_post_ra_pseudo(self, inst: MachineInstruction):
if inst.opcode == X64MachineOps.V_SET0:
inst.opcode = X64MachineOps.XORPSrr
reg_operand = inst.operands[0]
inst.add_reg(reg_operand.reg, RegState.Undef)
inst.add_reg(reg_operand.reg, RegState.Undef)
if inst.opcode == X64MachineOps.MOV32r0:
inst.opcode = X64MachineOps.XOR32rr
reg_operand = inst.operands[0]
inst.add_reg(reg_operand.reg, RegState.Undef)
inst.add_reg(reg_operand.reg, RegState.Undef)
if inst.opcode == X64MachineOps.MOV64r0:
inst.opcode = X64MachineOps.XOR64rr
reg_operand = inst.operands[0]
inst.add_reg(reg_operand.reg, RegState.Undef)
inst.add_reg(reg_operand.reg, RegState.Undef)
def get_super_regs(reg):
assert(isinstance(reg, MachineRegisterDef))
regs = MachineRegisterDef.regs
intersects = {}
for a_reg in regs:
intersects[a_reg] = set()
for a_reg in regs:
for subreg in a_reg.subregs:
intersects[subreg].add(a_reg)
stk = list(intersects[reg])
supers = set()
while len(stk) > 0:
poped = stk.pop()
if poped in supers:
continue
supers.add(poped)
for super_reg in intersects[poped]:
stk.append(super_reg)
return supers
def get_sub_regs(reg):
stk = list(reg.subregs)
subregs = set()
while len(stk) > 0:
poped = stk.pop()
if poped in subregs:
continue
subregs.add(poped)
for subreg in poped.subregs:
stk.append(subreg)
return subregs
def count_if(values, pred):
return len([v for v in values if pred(v)])
def find_if(values, pred):
for i, v in enumerate(values):
if pred(v):
return i
return -1
class X64TargetLowering(TargetLowering):
def __init__(self):
super().__init__()
self.reg_type_for_vt = {MachineValueType(
e): MachineValueType(e) for e in ValueType}
self.reg_type_for_vt[MachineValueType(
ValueType.I1)] = MachineValueType(ValueType.I8)
self.reg_count_for_vt = {MachineValueType(e): 1 for e in ValueType}
def get_reg_for_inline_asm_constraint(self, reg_info, code, vt):
reg, regclass = None, None
def is_gr_class(regclass):
return regclass in [GR8, GR16, GR32, GR64]
def is_fr_class(regclass):
return regclass in [FR32, FR64, VR128]
def get_sub_or_super_reg_for_size(reg, size_in_bits, high=False):
if size_in_bits == 8:
raise NotImplementedError()
elif size_in_bits == 16:
raise NotImplementedError()
elif size_in_bits == 32:
if reg in [CL, CX, ECX, RCX]:
return ECX
elif reg in [DL, DX, EDX, RDX]:
return EDX
raise NotImplementedError()
elif size_in_bits == 64:
if reg in [DIL, DI, EDI, RDI]:
return RDI
raise NotImplementedError()
raise ValueError("Can't found the suitable register")
TABLE = {
"{di}": (DI, GR16),
"{cx}": (CX, GR16),
"{dx}": (DX, GR16)
}
if code in TABLE:
reg, regclass = TABLE[code]
if not reg:
return None
if is_gr_class(regclass):
size = vt.get_size_in_bits()
if size == 8:
rc = GR8
elif size == 16:
rc = GR16
elif size == 32:
rc = GR32
elif size == 64:
rc = GR64
reg = get_sub_or_super_reg_for_size(reg, size)
return reg
else:
raise NotImplementedError()
raise NotImplementedError()
def get_register_type(self, vt):
if vt in self.reg_type_for_vt:
return self.reg_type_for_vt[vt]
raise NotImplementedError()
def get_register_count(self, vt):
if vt in self.reg_count_for_vt:
return self.reg_count_for_vt[vt]
raise NotImplementedError()
def lower_setcc(self, node: DagNode, dag: Dag):
op1 = node.operands[0]
op2 = node.operands[1]
cond = node.operands[2]
is_fcmp = op1.node.value_types[0].value_type in [
ValueType.F32, ValueType.F64]
def compute_condcode(cond):
ty = MachineValueType(ValueType.I8)
swap = False
cond = cond.node.cond
# if is_fcmp:
# if cond in [CondCode.SETOLT, CondCode.SETOLE, CondCode.SETUGT, CondCode.SETUGE]:
# swap = True
# if cond == CondCode.SETOLT:
# cond = CondCode.SETUGE
# elif cond == CondCode.SETOLE:
# cond = CondCode.SETUGT
# elif cond == CondCode.SETUGT:
# cond = CondCode.SETOLE
# elif cond == CondCode.SETUGE:
# cond = CondCode.SETOLT
if cond == CondCode.SETEQ:
node = dag.add_target_constant_node(ty, 4)
elif cond == CondCode.SETNE:
node = dag.add_target_constant_node(ty, 5)
elif cond == CondCode.SETLT:
node = dag.add_target_constant_node(ty, 12)
elif cond == CondCode.SETGT:
node = dag.add_target_constant_node(ty, 15)
elif cond == CondCode.SETLE:
node = dag.add_target_constant_node(ty, 14)
elif cond == CondCode.SETGE:
node = dag.add_target_constant_node(ty, 13)
elif cond in [CondCode.SETULT, CondCode.SETOLT]:
node = dag.add_target_constant_node(ty, 2)
elif cond in [CondCode.SETUGT, CondCode.SETOGT]:
node = dag.add_target_constant_node(ty, 7)
elif cond in [CondCode.SETULE, CondCode.SETOLE]:
node = dag.add_target_constant_node(ty, 6)
elif cond in [CondCode.SETUGE, CondCode.SETOGE]:
node = dag.add_target_constant_node(ty, 3)
else:
raise NotImplementedError()
return node, swap
condcode, swap = compute_condcode(cond)
if swap:
op1, op2 = op2, op1
if is_fcmp:
if cond in [CondCode.SETULT, CondCode.SETUGT, CondCode.SETULE, CondCode.SETUGE]:
op = X64DagOps.UCOMI
else:
op = X64DagOps.COMI
cmp_node = DagValue(dag.add_node(op,
[MachineValueType(ValueType.I32), MachineValueType(ValueType.GLUE)], op1, op2), 0)
else:
op = X64DagOps.SUB
cmp_node = DagValue(dag.add_node(op,
[op1.ty, MachineValueType(ValueType.I32), MachineValueType(ValueType.GLUE)], op1, op2), 1)
# operand 1 is eflags.
setcc_node = dag.add_node(X64DagOps.SETCC, node.value_types,
DagValue(condcode, 0), cmp_node, cmp_node.get_value(cmp_node.index + 1))
return setcc_node
def lower_brcond(self, node: DagNode, dag: Dag):
chain = node.operands[0]
cond = node.operands[1]
dest = node.operands[2]
if cond.node.opcode == VirtualDagOps.SETCC:
cond = DagValue(self.lower_setcc(cond.node, dag), 0)
if cond.node.opcode == X64DagOps.SETCC:
condcode = cond.node.operands[0]
cond = cond.node.operands[1]
else:
if cond.ty == MachineValueType(ValueType.I1):
cond = DagValue(dag.add_node(VirtualDagOps.ZERO_EXTEND, [
MachineValueType(ValueType.I32)], cond), 0)
zero = DagValue(dag.add_constant_node(cond.ty, 0), 0)
condcode = DagValue(dag.add_condition_code_node(CondCode.SETNE), 0)
cond = DagValue(dag.add_node(VirtualDagOps.SETCC, [
MachineValueType(ValueType.I1)], cond, zero, condcode), 0)
cond = DagValue(self.lower_setcc(cond.node, dag), 0)
condcode = cond.node.operands[0]
cond = cond.node.operands[1]
return dag.add_node(X64DagOps.BRCOND, node.value_types, chain, dest, condcode, cond)
def lower_global_address(self, node: DagNode, dag: Dag):
target_address = dag.add_global_address_node(
node.value_types[0], node.value, True)
wrapper_opc = X64DagOps.WRAPPER if node.value.is_thread_local else X64DagOps.WRAPPER_RIP
return dag.add_node(wrapper_opc, node.value_types, DagValue(target_address, 0))
def lower_global_tls_address(self, node: DagNode, dag: Dag):
data_layout = dag.mfunc.func_info.func.module.data_layout
ptr_ty = self.get_pointer_type(data_layout)
global_value = node.value
if dag.mfunc.target_info.machine.options.emulated_tls:
raise NotImplementedError()
if dag.mfunc.target_info.triple.os == OS.Linux:
if global_value.thread_local == ThreadLocalMode.GeneralDynamicTLSModel:
ga = DagValue(dag.add_global_address_node(
ptr_ty, global_value, True), 0)
ops = [dag.entry, ga]
chain = DagValue(dag.add_node(X64DagOps.TLSADDR, [
MachineValueType(ValueType.OTHER)], *ops), 0)
reg_node = DagValue(dag.add_register_node(
ptr_ty, MachineRegister(RAX)), 0)
return dag.add_node(VirtualDagOps.COPY_FROM_REG, [ptr_ty, MachineValueType(
ValueType.OTHER)], chain, reg_node)
raise ValueError("Not supporing TLS model.")
if dag.mfunc.target_info.triple.os == OS.Windows:
ptr = get_constant_null_value(PointerType(i8, 256))
tls_array = DagValue(dag.add_constant_node(ptr_ty, 0x58), 0)
tls_array = DagValue(dag.add_node(
X64DagOps.WRAPPER, node.value_types, tls_array), 0)
thread_ptr = DagValue(dag.add_load_node(
ptr_ty, dag.entry, tls_array, False, ptr_info=MachinePointerInfo(ptr)), 0)
if global_value.thread_local == ThreadLocalMode.LocalExecTLSModel:
raise NotImplementedError()
else:
idx = DagValue(dag.add_external_symbol_node(
ptr_ty, "_tls_index", False), 0)
idx = DagValue(dag.add_node(
X64DagOps.WRAPPER_RIP, node.value_types, idx), 0)
idx = DagValue(dag.add_load_node(
ptr_ty, dag.entry, idx, False), 0)
def log2_uint64_cail(value):
if value == 0:
return 0
value = value - 1
for i in reversed(range(63)):
if (value & (1 << 64)) != 0:
return i
value = value << 1
return 0
scale = DagValue(dag.add_constant_node(
MachineValueType(ValueType.I8), log2_uint64_cail(data_layout.get_pointer_size_in_bits())), 0)
idx = DagValue(dag.add_node(
VirtualDagOps.SHL, [ptr_ty], idx, scale), 0)
thread_ptr = DagValue(dag.add_node(
VirtualDagOps.ADD, [ptr_ty], thread_ptr, idx), 0)
tls_ptr = DagValue(dag.add_load_node(
ptr_ty, dag.entry, thread_ptr, False), 0)
# This value is the offset from the .tls section
target_address = DagValue(dag.add_global_address_node(
node.value_types[0], node.value, True, target_flags=X64OperandFlag.SECREL), 0)
offset = DagValue(dag.add_node(
X64DagOps.WRAPPER, node.value_types, target_address), 0)
return dag.add_node(VirtualDagOps.ADD, [ptr_ty], tls_ptr, offset)
raise NotImplementedError()
def get_pointer_type(self, data_layout, addr_space=0):
return get_int_value_type(data_layout.get_pointer_size_in_bits(addr_space))
def get_frame_index_type(self, data_layout):
return get_int_value_type(data_layout.get_pointer_size_in_bits(0))
def lower_constant_fp(self, node: DagNode, dag: Dag):
assert(isinstance(node, ConstantFPDagNode))
data_layout = dag.mfunc.func_info.func.module.data_layout
ptr_ty = self.get_pointer_type(data_layout)
constant_pool = dag.add_constant_pool_node(ptr_ty, node.value, False)
return dag.add_load_node(node.value_types[0], dag.entry, DagValue(constant_pool, 0), False)
def lower_constant_pool(self, node: DagNode, dag: Dag):
assert(isinstance(node, ConstantPoolDagNode))
target_constant_pool = dag.add_constant_pool_node(
node.value_types[0], node.value, True)
return dag.add_node(X64DagOps.WRAPPER_RIP, node.value_types, DagValue(target_constant_pool, 0))
def lower_build_vector(self, node: DagNode, dag: Dag):
assert(node.opcode == VirtualDagOps.BUILD_VECTOR)
elm = node.operands[0]
all_eq = True
all_constant_fp = True
for operand in node.operands:
if elm.node != operand.node or elm.index != operand.index:
all_eq = False
if not isinstance(elm.node, ConstantFPDagNode):
all_constant_fp = False
elm = operand
operands = []
if all_eq:
if all_constant_fp:
for operand in node.operands:
target_constant_fp = dag.add_target_constant_fp_node(
operand.node.value_types[0], operand.node.value)
operands.append(DagValue(target_constant_fp, 0))
return dag.add_node(VirtualDagOps.BUILD_VECTOR, node.value_types, *operands)
result = self._mm_set_ps1(node.value_types[0], elm, dag)
return result.node
else:
raise NotImplementedError()
def shuffle_param(self, fp3, fp2, fp1, fp0):
return (fp3 << 6) | (fp2 << 4) | (fp1 << 2) | fp0
def get_x86_shuffle_mask_v4(self, mask, dag):
mask_val = self.shuffle_param(mask[3], mask[2], mask[1], mask[0])
return DagValue(dag.add_target_constant_node(MachineValueType(ValueType.I8), mask_val), 0)
def _mm_set_ps1(self, vec_ty, val, dag):
vec = DagValue(dag.add_node(
VirtualDagOps.SCALAR_TO_VECTOR, [vec_ty], val), 0)
param = DagValue(dag.add_target_constant_node(MachineValueType(
ValueType.I8), self.shuffle_param(0, 0, 0, 0)), 0)
return DagValue(dag.add_node(X64DagOps.SHUFP, vec.node.value_types, vec, vec, param), 0)
def lower_insert_vector_elt(self, node: DagNode, dag: Dag):
assert(node.opcode == VirtualDagOps.INSERT_VECTOR_ELT)
vec = node.operands[0]
elem = node.operands[1]
idx = node.operands[2]
if isinstance(idx.node, ConstantDagNode):
elem_vec = DagValue(dag.add_node(
VirtualDagOps.SCALAR_TO_VECTOR, [vec.ty], elem), 0)
num_elems = vec.ty.get_num_vector_elems()
idx_val = idx.node.value
shuffle_idx = []
for i in range(num_elems):
if i == idx_val.value:
shuffle_idx.append(num_elems)
else:
shuffle_idx.append(i)
return dag.add_shuffle_vector(vec.ty, vec, elem_vec, shuffle_idx)
raise ValueError()
def get_scalar_value_for_vec_elem(self, vec, idx, dag: Dag):
if vec.node.opcode == VirtualDagOps.SCALAR_TO_VECTOR and idx == 0:
scalar_val = vec.node.operands[idx]
return scalar_val
raise ValueError()
def lower_shuffle_as_elem_insertion(self, vt, vec1, vec2, mask, dag: Dag):
vec2_idx = find_if(mask, lambda m: m >= len(mask))
elem_vt = vt.get_vector_elem_type()
assert(elem_vt.value_type == ValueType.F32)
vec2_elem = self.get_scalar_value_for_vec_elem(vec2, vec2_idx, dag)
vec2 = DagValue(dag.add_node(
VirtualDagOps.SCALAR_TO_VECTOR, [vt], vec2_elem), 0)
if elem_vt.value_type == ValueType.F32:
opcode = X64DagOps.MOVSS
else:
opcode = X64DagOps.MOVSD
return dag.add_node(opcode, [vt], vec1, vec2)
def lower_shuffle_shufps(self, vt, vec1, vec2, mask, dag: Dag):
assert(len(mask) == 4)
num_vec2_elems = count_if(mask, lambda m: m >= 4)
new_mask = list(mask)
lo_vec, hi_vec = vec1, vec2
if num_vec2_elems == 1:
vec2_idx = find_if(mask, lambda m: m >= 4)
# Each element of the vector is divided into groups of two elements.
# If the index is odd, the index of the other element is even.
vec2_idx_adj = vec2_idx ^ 1
# Merge the vectors.
blend_mask = [mask[vec2_idx] - 4, 0, mask[vec2_idx_adj], 0]
vec2 = DagValue(dag.add_node(X64DagOps.SHUFP, [
vt], vec2, vec1, self.get_x86_shuffle_mask_v4(blend_mask, dag)), 0)
if vec2_idx < 2:
lo_vec = vec2
hi_vec = vec1
else:
lo_vec = vec1
hi_vec = vec2
new_mask[vec2_idx] = 0
new_mask[vec2_idx_adj] = 2
elif num_vec2_elems == 2:
raise NotImplementedError()
return dag.add_node(X64DagOps.SHUFP, [vt], lo_vec, hi_vec, self.get_x86_shuffle_mask_v4(new_mask, dag))
def lower_v4f32_shuffle(self, node: DagNode, dag: Dag):
vec1 = node.operands[0]
vec2 = node.operands[1]
mask = node.mask
num_vec2_elems = count_if(mask, lambda m: m >= 4)
if num_vec2_elems == 1 and mask[0] >= 4:
return self.lower_shuffle_as_elem_insertion(MachineValueType(ValueType.V4F32), vec1, vec2, mask, dag)
return self.lower_shuffle_shufps(MachineValueType(ValueType.V4F32), vec1, vec2, mask, dag)
def lower_shuffle_vector(self, node: DagNode, dag: Dag):
if node.value_types[0] == MachineValueType(ValueType.V4F32):
return self.lower_v4f32_shuffle(node, dag)
raise ValueError()
def lower_sub(self, node: DagNode, dag: Dag):
return dag.add_node(X64DagOps.SUB, node.value_types, *node.operands)
def lower_atomic_fence(self, node: DagNode, dag: Dag):
ordering = node.operands[1].node.value.value
if ordering == AtomicOrdering.SequentiallyConsistent.value:
raise NotImplementedError()
return dag.add_node(X64DagOps.MEMBARRIER, node.value_types, node.operands[0])
def lower_div(self, node: DagNode, dag: Dag):
is_signed = node.opcode == VirtualDagOps.SDIV
divrem_opc = VirtualDagOps.SDIVREM if is_signed else VirtualDagOps.UDIVREM
value_ty = node.value_types[0]
return dag.add_node(divrem_opc, [value_ty, value_ty], *node.operands)
def lower_fp_to_int(self, node: DagNode, dag: Dag):
is_signed = node.opcode == VirtualDagOps.FP_TO_SINT
src = node.operands[0]
value_ty = node.value_types[0]
if src.ty == MachineValueType(ValueType.F64):
value = DagValue(dag.add_node(VirtualDagOps.FP_TO_SINT, [
MachineValueType(ValueType.I64)], *node.operands), 0)
if value.ty == value_ty:
return value.node
return dag.add_node(VirtualDagOps.TRUNCATE, [value_ty], value)
elif src.ty == MachineValueType(ValueType.F32):
value = DagValue(dag.add_node(VirtualDagOps.FP_TO_SINT, [
MachineValueType(ValueType.I32)], *node.operands), 0)
if value.ty == value_ty:
return value.node
return dag.add_node(VirtualDagOps.TRUNCATE, [value_ty], value)
raise NotImplementedError()
def get_unpackl(self, value_ty: MachineValueType, v1: DagValue, v2: DagValue, dag: Dag):
def get_unpack_shuffle_mask(value_ty, lo, unary):
num_elem = value_ty.get_num_vector_elems()
num_elem_in_lane = 128 / value_ty.get_vector_elem_size_in_bits()
mask = []
for i in range(num_elem):
lane_start = int(int(i / num_elem_in_lane) * num_elem_in_lane)
pos = (i % num_elem_in_lane) >> 2 + lane_start
pos += (0 if unary else (num_elem * (i % 2)))
pos += (0 if lo else (num_elem_in_lane >> 1))
mask.append(pos)
return mask
shuffle_idx = get_unpack_shuffle_mask(value_ty, True, False)
return dag.add_shuffle_vector(value_ty, v1, v2, shuffle_idx)
def lower_uint_to_fp(self, node: DagNode, dag: Dag):
data_layout = dag.mfunc.func_info.func.module.data_layout
target_lowering = dag.mfunc.target_info.get_lowering()
ptr_ty = target_lowering.get_frame_index_type(data_layout)
is_signed = node.opcode == VirtualDagOps.FP_TO_SINT
src = node.operands[0]
value_ty = node.value_types[0]
def int_to_double(value):
from struct import unpack, pack
bys = pack("q", value)
return unpack('d', bys)[0]
if src.ty == MachineValueType(ValueType.I32):
src = DagValue(dag.add_node(VirtualDagOps.ZERO_EXTEND, [
MachineValueType(ValueType.I64)], src), 0)
return dag.add_node(VirtualDagOps.SINT_TO_FP, [value_ty], src)
if src.ty == MachineValueType(ValueType.I64) and value_ty == MachineValueType(ValueType.F64):
cv0 = [0x43300000, 0x45300000, 0, 0]
c0 = ConstantVector(cv0, VectorType("v4i32", i32, 4))
cp0 = DagValue(dag.add_constant_pool_node(ptr_ty, c0, align=16), 0)
cv2 = [int_to_double(0x4330000000000000),
int_to_double(0x4530000000000000)]
c2 = ConstantVector(cv2, VectorType("v2f64", f64, 2))
cp2 = DagValue(dag.add_constant_pool_node(ptr_ty, c2, align=16), 0)
src_vec = DagValue(dag.add_node(VirtualDagOps.SCALAR_TO_VECTOR, [
MachineValueType(ValueType.V2I64)], src), 0)
exp_part_vec = DagValue(dag.add_load_node(
MachineValueType(ValueType.V4I32), dag.entry, cp0), 0)
unpack1 = DagValue(dag.add_node(VirtualDagOps.BITCAST, [
MachineValueType(ValueType.V4I32)], src_vec), 0)
unpack1 = DagValue(self.get_unpackl(
unpack1.ty, unpack1, exp_part_vec, dag), 0)
cst_val2 = DagValue(dag.add_load_node(
MachineValueType(ValueType.V2F64), dag.entry, cp2), 0)
unpack1 = DagValue(dag.add_node(VirtualDagOps.BITCAST, [
MachineValueType(ValueType.V2F64)], unpack1), 0)
sub_val = DagValue(dag.add_node(VirtualDagOps.FSUB, [
MachineValueType(ValueType.V2F64)], unpack1, cst_val2), 0)
shuffle_val = DagValue(dag.add_shuffle_vector(
MachineValueType(ValueType.V2F64), unpack1, unpack1, [1, -1]), 0)
add_val = DagValue(dag.add_node(VirtualDagOps.FADD, [
MachineValueType(ValueType.V2F64)], sub_val, shuffle_val), 0)
zero_val = DagValue(dag.add_target_constant_node(
MachineValueType(ValueType.I32), 0), 0)
return dag.add_node(VirtualDagOps.EXTRACT_VECTOR_ELT, [MachineValueType(ValueType.F64)], add_val, zero_val)
return node
def lower(self, node: DagNode, dag: Dag):
if node.opcode == VirtualDagOps.ENTRY:
return dag.entry.node
if node.opcode == VirtualDagOps.BRCOND:
return self.lower_brcond(node, dag)
elif node.opcode == VirtualDagOps.SETCC:
return self.lower_setcc(node, dag)
elif node.opcode == VirtualDagOps.SUB:
return self.lower_sub(node, dag)
elif node.opcode in [VirtualDagOps.SDIV, VirtualDagOps.UDIV]:
return self.lower_div(node, dag)
elif node.opcode in [VirtualDagOps.FP_TO_SINT, VirtualDagOps.FP_TO_UINT]:
return self.lower_fp_to_int(node, dag)
elif node.opcode == VirtualDagOps.UINT_TO_FP:
return self.lower_uint_to_fp(node, dag)
elif node.opcode == VirtualDagOps.GLOBAL_ADDRESS:
return self.lower_global_address(node, dag)
elif node.opcode == VirtualDagOps.GLOBAL_TLS_ADDRESS:
return self.lower_global_tls_address(node, dag)
elif node.opcode == VirtualDagOps.CONSTANT_FP:
return self.lower_constant_fp(node, dag)
elif node.opcode == VirtualDagOps.CONSTANT_POOL:
return self.lower_constant_pool(node, dag)
elif node.opcode == VirtualDagOps.BUILD_VECTOR:
return self.lower_build_vector(node, dag)
elif node.opcode == VirtualDagOps.SHUFFLE_VECTOR:
return self.lower_shuffle_vector(node, dag)
elif node.opcode == VirtualDagOps.INSERT_VECTOR_ELT:
return self.lower_insert_vector_elt(node, dag)
elif node.opcode == VirtualDagOps.ATOMIC_FENCE:
return self.lower_atomic_fence(node, dag)
else:
return node
def lower_arguments(self, func: Function, builder: DagBuilder):
arg_load_chains = []
chain = builder.root
mfunc = builder.mfunc
calling_conv = mfunc.target_info.get_calling_conv()
reg_info = mfunc.target_info.get_register_info()
data_layout = func.module.data_layout
target_lowering = mfunc.target_info.get_lowering()
ptr_ty = target_lowering.get_frame_index_type(data_layout)
args = []
for i, arg in enumerate(func.args):
vts = compute_value_types(arg.ty, data_layout)
offset_in_arg = 0
for val_idx, vt in enumerate(vts):
reg_vt = reg_info.get_register_type(vt)
reg_count = reg_info.get_register_count(vt)
for reg_idx in range(reg_count):
flags = CCArgFlags()
args.append(CallingConvArg(
vt, reg_vt, i, offset_in_arg, flags))
offset_in_arg += reg_vt.get_size_in_byte()
ccstate = CallingConvState(calling_conv, mfunc)
ccstate.compute_arguments_layout(args)
arg_vals = []
for arg_val in ccstate.values:
arg_vt = arg_val.loc_vt
if isinstance(arg_val, CCArgReg):
if arg_vt.value_type == ValueType.I8:
regclass = GR8
elif arg_vt.value_type == ValueType.I16:
regclass = GR16
elif arg_vt.value_type == ValueType.I32:
regclass = GR32
elif arg_vt.value_type == ValueType.I64:
regclass = GR64
elif arg_vt.value_type == ValueType.F32:
regclass = FR32
elif arg_vt.value_type == ValueType.F64:
regclass = FR64
elif arg_vt.value_type == ValueType.V4F32:
regclass = VR128
else:
raise ValueError()
reg = mfunc.reg_info.create_virtual_register(regclass)
mfunc.reg_info.add_live_in(MachineRegister(arg_val.reg), reg)
reg_node = DagValue(
builder.g.add_register_node(arg_vt, reg), 0)
arg_val_node = DagValue(
builder.g.add_copy_from_reg_node(arg_vt, reg_node), 0)
else:
assert(isinstance(arg_val, CCArgMem))
size = arg_vt.get_size_in_byte()
offset = arg_val.offset
frame_idx = builder.mfunc.frame.create_fixed_stack_object(
size, offset + 32)
frame_idx_node = DagValue(
builder.g.add_frame_index_node(ptr_ty, frame_idx), 0)
arg_val_node = DagValue(builder.g.add_load_node(
arg_vt, builder.root, frame_idx_node, False), 0)
if arg_val.loc_info == CCArgLocInfo.Indirect:
arg_val_node = DagValue(builder.g.add_load_node(
arg_val.vt, builder.root, arg_val_node, False), 0)
arg_vals.append(arg_val_node)
arg_idx = 0
for i, arg in enumerate(func.args):
vts = compute_value_types(arg.ty, data_layout)
offset_in_arg = 0
arg_parts = []
for val_idx, vt in enumerate(vts):
reg_vt = reg_info.get_register_type(vt)
reg_count = reg_info.get_register_count(vt)
if reg_count > 1:
raise NotImplementedError()
arg_parts.append(arg_vals[arg_idx])
arg_idx += reg_count
val = builder.g.add_merge_values(arg_parts)
if val.node.opcode == VirtualDagOps.COPY_FROM_REG:
reg = val.node.operands[1].node.reg
if isinstance(reg, MachineVirtualRegister):
builder.func_info.reg_value_map[arg] = [reg]
else:
reg_info = builder.reg_info
for ty, arg_part in zip(vts, arg_parts):
reg_vt = reg_info.get_register_type(ty)
reg_count = reg_info.get_register_count(ty)
regs = []
reg_vals = []
for idx in range(reg_count):
vreg = target_lowering.get_machine_vreg(
reg_vt)
reg = builder.mfunc.reg_info.create_virtual_register(
vreg)
regs.append(reg)
reg_vals.append(
DagValue(builder.g.add_register_node(reg_vt, reg), 0))
chain = get_copy_to_parts(
arg_part, reg_vals, reg_vt, chain, builder.g)
builder.func_info.reg_value_map[arg] = regs
builder.set_inst_value(arg, val)
builder.root = chain
has_demote_arg = len(func.args) > 0 and func.args[0].has_attribute(
AttributeKind.StructRet)
if has_demote_arg:
demote_arg = func.args[0]
builder.func_info.sret_reg = builder.func_info.reg_value_map[demote_arg]
else:
builder.func_info.sret_reg = None
# builder.root = DagValue(DagNode(VirtualDagOps.TOKEN_FACTOR, [
# MachineValueType(ValueType.OTHER)], arg_load_chains), 0)
def is_frame_op(self, inst):
if inst.opcode == X64MachineOps.ADJCALLSTACKDOWN32:
return True
if inst.opcode == X64MachineOps.ADJCALLSTACKUP32:
return True
return False
def lower_prolog(self, func: MachineFunction, bb: MachineBasicBlock):
inst_info = func.target_info.get_inst_info()
frame_info = func.target_info.get_frame_lowering()
reg_info = func.target_info.get_register_info()
data_layout = func.func_info.func.module.data_layout
front_inst = bb.insts[0]
push_rbp_inst = MachineInstruction(X64MachineOps.PUSH64r)
push_rbp_inst.add_reg(MachineRegister(RBP), RegState.Non)
push_rbp_inst.add_reg(MachineRegister(RSP), RegState.ImplicitDefine)
push_rbp_inst.insert_before(front_inst)
mov_esp_inst = MachineInstruction(X64MachineOps.MOV64rr)
mov_esp_inst.add_reg(MachineRegister(RBP), RegState.Define) # To
mov_esp_inst.add_reg(MachineRegister(RSP), RegState.Non) # From
mov_esp_inst.insert_before(front_inst)
# The stack and base pointer is aligned by 16 bytes here.
for cs_info in func.frame.calee_save_info:
reg = cs_info.reg
regclass = reg_info.get_regclass_from_reg(reg)
frame_idx = cs_info.frame_idx
inst_info.copy_reg_to_stack(MachineRegister(
reg), frame_idx, regclass, front_inst)
stack_size = func.frame.estimate_stack_size(
X64MachineOps.ADJCALLSTACKDOWN32, X64MachineOps.ADJCALLSTACKUP32)
max_align = max(func.frame.max_alignment, func.frame.stack_alignment)
stack_size = int(
int((stack_size + max_align - 1) / max_align) * max_align)
sub_esp_inst = MachineInstruction(X64MachineOps.SUB64ri)
sub_esp_inst.add_reg(MachineRegister(RSP), RegState.Define)
sub_esp_inst.add_reg(MachineRegister(RSP), RegState.Non)
sub_esp_inst.add_imm(stack_size)
sub_esp_inst.insert_before(front_inst)
def lower_epilog(self, func: MachineFunction, bb: MachineBasicBlock):
inst_info = func.target_info.get_inst_info()
reg_info = func.target_info.get_register_info()
data_layout = func.func_info.func.module.data_layout
front_inst = bb.insts[-1]
for cs_info in func.frame.calee_save_info:
reg = cs_info.reg
regclass = reg_info.get_regclass_from_reg(reg)
frame_idx = cs_info.frame_idx
inst_info.copy_reg_from_stack(MachineRegister(
reg), frame_idx, regclass, front_inst)
restore_esp_inst = MachineInstruction(X64MachineOps.MOV64rr)
restore_esp_inst.add_reg(MachineRegister(RSP), RegState.Define) # To
restore_esp_inst.add_reg(MachineRegister(RBP), RegState.Non) # From
restore_esp_inst.insert_before(front_inst)
pop_rbp_inst = MachineInstruction(X64MachineOps.POP64r)
pop_rbp_inst.add_reg(MachineRegister(RBP), RegState.Non)
pop_rbp_inst.add_reg(MachineRegister(RSP), RegState.ImplicitDefine)
pop_rbp_inst.insert_before(front_inst)
def eliminate_call_frame_pseudo_inst(self, func, inst: MachineInstruction):
inst.remove()
def get_machine_vreg(self, ty: MachineValueType):
if ty.value_type == ValueType.I1:
return GR8
elif ty.value_type == ValueType.I8:
return GR8
elif ty.value_type == ValueType.I16:
return GR16
elif ty.value_type == ValueType.I32:
return GR32
elif ty.value_type == ValueType.I64:
return GR64
elif ty.value_type == ValueType.F32:
return FR32
elif ty.value_type == ValueType.F64:
return FR64
elif ty.value_type == ValueType.V4F32:
return VR128
raise NotImplementedError()
def lower_optimal_memory_op(self, size, src_op, dst_op, src_align, dst_align, builder: DagBuilder):
chain = builder.root
is_volatile = False
offset = 0
chains = []
while offset < size:
copy_size = min(4, size - offset)
copy_ty = MachineValueType(ValueType.I32)
if offset != 0:
src_ty = src_op.ty
size_node = DagValue(
builder.g.add_target_constant_node(src_ty, offset), 0)
src_ptr = DagValue(builder.g.add_node(
VirtualDagOps.ADD, [src_ty], src_op, size_node), 0)
else:
src_ptr = src_op
if offset != 0:
dst_ty = dst_op.ty
size_node = DagValue(
builder.g.add_target_constant_node(dst_ty, offset), 0)
dst_ptr = DagValue(builder.g.add_node(
VirtualDagOps.ADD, [dst_ty], dst_op, size_node), 0)
else:
dst_ptr = dst_op
load_op = builder.g.add_load_node(
copy_ty, chain, src_ptr, is_volatile)
store_op = builder.g.add_store_node(
chain, dst_ptr, DagValue(load_op, 0))
chains.extend([DagValue(store_op, 0)])
offset += copy_size
builder.root = DagValue(builder.g.add_node(VirtualDagOps.TOKEN_FACTOR, [
MachineValueType(ValueType.OTHER)], *chains), 0)
class X64TargetRegisterInfo(TargetRegisterInfo):
def __init__(self, target_info):
super().__init__()
self.target_info = target_info
self.triple = target_info.triple
def get_reserved_regs(self):
reserved = []
reserved.extend([SPL, BPL])
reserved.extend([SP, BP])
reserved.extend([ESP, EBP])
reserved.extend([RSP, RBP])
return reserved
@property
def allocatable_regs(self):
regs = set()
regs |= set(GR64.regs)
regs |= set(GR32.regs)
regs |= set(GR16.regs)
regs |= set(GR8.regs)
regs |= set(FR32.regs)
regs |= set(FR64.regs)
regs |= set(VR128.regs)
return regs
def get_callee_saved_regs(self):
if self.triple.arch == ArchType.X86_64:
if self.triple.os == OS.Windows:
return [RBX, RDI, RSI, R12, R13, R14, R15, XMM6,
XMM7, XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15]
elif self.triple.os == OS.Linux:
return [RBX, R12, R13, R14, R15, XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15]
raise Exception("Unsupporting architecture.")
def get_callee_clobbered_regs(self):
if self.triple.arch == ArchType.X86_64:
if self.triple.os == OS.Windows:
return [RAX, RCX, RDX, R8, R9, R10, R11,
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5]
elif self.triple.os == OS.Linux:
return [RAX, RDI, RSI, RCX, RDX, R8, R9, R10, R11,
XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]
raise Exception("Unsupporting architecture.")
def get_ordered_regs(self, regclass):
reserved_regs = self.get_reserved_regs()
free_regs = set(regclass.regs) - set(reserved_regs)
return [reg for reg in regclass.regs if reg in free_regs]
def get_regclass_for_vt(self, vt):
hwmode = self.target_info.hwmode
for regclass in x64_regclasses:
tys = regclass.get_types(hwmode)
if vt in tys:
return regclass
raise ValueError("Could not find the register class.")
class X64FrameLowering(TargetFrameLowering):
def __init__(self, alignment):
super().__init__(alignment)
self.frame_spill_size = 16
@property
def stack_grows_direction(self):
return StackGrowsDirection.Down
class X64Legalizer(Legalizer):
def __init__(self):
super().__init__()
def promote_integer_result_setcc(self, node, dag, legalized):
lhs = get_legalized_op(node.operands[0], legalized)
rhs = get_legalized_op(node.operands[1], legalized)
cond = node.operands[2]
setcc_ty = MachineValueType(ValueType.I8)
return dag.add_node(node.opcode, [setcc_ty], lhs, rhs, cond)
def promote_integer_result_bin(self, node, dag, legalized):
lhs = get_legalized_op(node.operands[0], legalized)
rhs = get_legalized_op(node.operands[1], legalized)
assert(lhs.ty == rhs.ty)
return dag.add_node(node.opcode, [lhs.ty], lhs, rhs)
def promote_integer_result_truncate(self, node, dag, legalized):
new_vt = MachineValueType(ValueType.I8)
return dag.add_node(VirtualDagOps.TRUNCATE, [new_vt], *node.operands)
def promote_integer_result_constant(self, node, dag, legalized):
new_vt = MachineValueType(ValueType.I8)
return dag.add_constant_node(new_vt, node.value)
return dag.add_node(VirtualDagOps.ZERO_EXTEND, [new_vt], DagValue(node, 0))
def promote_integer_result(self, node, dag, legalized):
if node.opcode == VirtualDagOps.SETCC:
return self.promote_integer_result_setcc(node, dag, legalized)
elif node.opcode in [VirtualDagOps.ADD, VirtualDagOps.SUB, VirtualDagOps.AND, VirtualDagOps.OR, VirtualDagOps.XOR]:
return self.promote_integer_result_bin(node, dag, legalized)
elif node.opcode == VirtualDagOps.TRUNCATE:
return self.promote_integer_result_truncate(node, dag, legalized)
elif node.opcode in [VirtualDagOps.LOAD]:
chain = node.operands[0]
ptr = get_legalized_op(node.operands[1], legalized)
return dag.add_load_node(MachineValueType(ValueType.I8), chain, ptr, False, mem_operand=node.mem_operand)
elif node.opcode == VirtualDagOps.CONSTANT:
return self.promote_integer_result_constant(node, dag, legalized)
else:
raise ValueError("No method to promote.")
def legalize_node_result(self, node: DagNode, dag: Dag, legalized):
for vt in node.value_types:
if vt.value_type == ValueType.I1:
return self.promote_integer_result(node, dag, legalized)
return node
def promote_integer_operand_brcond(self, node, dag: Dag, legalized):
chain_op = node.operands[0]
cond_op = get_legalized_op(node.operands[1], legalized)
dst_op = node.operands[2]
return dag.add_node(VirtualDagOps.BRCOND, node.value_types, chain_op, cond_op, dst_op)
def promote_integer_operand_zext(self, node, dag: Dag, legalized):
src_op = get_legalized_op(node.operands[0], legalized)
if src_op.ty == node.value_types[0]:
return src_op.node
return dag.add_node(VirtualDagOps.TRUNCATE, node.value_types, src_op)
def promote_integer_operand_uint_to_fp(self, node, dag: Dag, legalized):
src_op = get_legalized_op(node.operands[0], legalized)
if src_op.ty == MachineValueType(ValueType.I16):
promoted_ty = MachineValueType(ValueType.I32)
else:
raise NotImplementedError()
promoted = DagValue(dag.add_node(
VirtualDagOps.ZERO_EXTEND, [promoted_ty], src_op), 0)
return dag.add_node(VirtualDagOps.UINT_TO_FP, node.value_types, promoted)
def promote_integer_operand_sint_to_fp(self, node, dag: Dag, legalized):
src_op = get_legalized_op(node.operands[0], legalized)
if src_op.ty == MachineValueType(ValueType.I16):
promoted_ty = MachineValueType(ValueType.I32)
else:
raise NotImplementedError()
promoted = DagValue(dag.add_node(
VirtualDagOps.SIGN_EXTEND, [promoted_ty], src_op), 0)
return dag.add_node(VirtualDagOps.SINT_TO_FP, node.value_types, promoted)
def legalize_node_operand(self, node, i, dag: Dag, legalized):
operand = node.operands[i]
vt = operand.ty
if vt.value_type == ValueType.I1:
if node.opcode == VirtualDagOps.BRCOND:
return self.promote_integer_operand_brcond(
node, dag, legalized)
if node.opcode == VirtualDagOps.ZERO_EXTEND:
return self.promote_integer_operand_zext(
node, dag, legalized)
if node.opcode == VirtualDagOps.STORE:
op_chain = node.operands[0]
op_val = get_legalized_op(node.operands[1], legalized)
op_ptr = node.operands[2]
return dag.add_store_node(op_chain, op_ptr, op_val, False, mem_operand=node.mem_operand)
if vt.value_type == ValueType.I16:
if node.opcode == VirtualDagOps.SINT_TO_FP:
return self.promote_integer_operand_sint_to_fp(
node, dag, legalized)
if node.opcode == VirtualDagOps.UINT_TO_FP:
return self.promote_integer_operand_uint_to_fp(
node, dag, legalized)
return None
class X64TargetInfo(TargetInfo):
def __init__(self, triple, machine):
super().__init__(triple)
self.machine = machine
def get_inst_info(self) -> TargetInstInfo:
return X64TargetInstInfo()
def is_64bit_mode(self):
return self.triple.arch == ArchType.X86_64
def get_lowering(self) -> TargetLowering:
return X64TargetLowering()
def get_register_info(self) -> TargetRegisterInfo:
return X64TargetRegisterInfo(self)
def get_calling_conv(self) -> CallingConv:
return X86CallingConv()
def get_instruction_selector(self):
return X64InstructionSelector()
def get_legalizer(self):
return X64Legalizer()
def get_frame_lowering(self) -> TargetFrameLowering:
return X64FrameLowering(16)
@property
def hwmode(self) -> MachineHWMode:
if self.triple.arch == ArchType.X86_64:
return X64
raise ValueError("Invalid arch type")
class X64TargetMachine(TargetMachine):
def __init__(self, triple, options):
super().__init__(options)
self.triple = triple
def get_target_info(self, func: Function):
return X64TargetInfo(self.triple, self)
def add_mc_emit_passes(self, pass_manager, mccontext, output, is_asm):
from codegen.x64_asm_printer import X64AsmInfo, MCAsmStream, X64CodeEmitter, X64AsmBackend, X64AsmPrinter, X64IntelInstPrinter
from codegen.coff import WinCOFFObjectWriter, WinCOFFObjectStream
from codegen.elf import ELFObjectStream, ELFObjectWriter, X64ELFObjectWriter
objformat = self.triple.objformat
mccontext.asm_info = X64AsmInfo()
if is_asm:
printer = X64IntelInstPrinter()
stream = MCAsmStream(mccontext, output, printer)
else:
emitter = X64CodeEmitter()
backend = X64AsmBackend()
if objformat == ObjectFormatType.COFF:
writer = WinCOFFObjectWriter(output)
stream = WinCOFFObjectStream(
mccontext, backend, writer, emitter)
elif objformat == ObjectFormatType.ELF:
target_writer = X64ELFObjectWriter()
writer = ELFObjectWriter(output, target_writer)
stream = ELFObjectStream(mccontext, backend, writer, emitter)
pass_manager.passes.append(X64AsmPrinter(stream))
|
python
|
import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(
name='pylateral',
version='1.0.0',
description='Intuitive multi-threaded task processing in python.',
long_description=README,
long_description_content_type="text/markdown",
url='https://boxysean.github.io/pylateral/',
author='Sean McIntyre',
author_email='[email protected]',
license='MIT',
tests_require=[
'pytest',
],
packages=find_packages(),
)
|
python
|
import numpy as np
class BaseAgent:
def __init__(self, name, environment=None):
self.name = name
self.environment = environment
def choose_action(self):
action = np.random.choice(self.environment.valid_actions)
pawn_actions = [a for a in self.environment.valid_actions if a < 12]
action = action = np.random.choice(pawn_actions)
print("Choosing action {action}".format(action=action))
return action
|
python
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Updates ExtensionPermission2 enum in histograms.xml file with values read
from permission_message.h.
If the file was pretty-printed, the updated version is pretty-printed too.
"""
import os
from update_histogram_enum import UpdateHistogramEnum
if __name__ == '__main__':
UpdateHistogramEnum(histogram_enum_name='ExtensionPermission2',
source_enum_path=os.path.join('..', '..', '..',
'extensions', 'common',
'permissions',
'permission_message.h'),
start_marker='^enum ID {',
end_marker='^kEnumBoundary')
|
python
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
import pkg_resources
import pytest
import torch
from flsim.common.pytest_helper import assertEqual
from flsim.data.csv_dataset import FLCSVDataset
from flsim.data.data_sharder import ColumnSharderConfig
from flsim.data.dataset_data_loader import FLDatasetDataLoaderWithBatch
from hydra.utils import instantiate
@pytest.fixture(scope="class")
def prepare_dataset_data_loader_with_batch(request):
request.cls.test_csv_path = "test_resources/data.csv"
request.cls.total_data_count = 15
request.cls.train_batch_size = 1
request.cls.eval_batch_size = 3
request.cls.test_batch_size = 5
class TestDataset(FLCSVDataset):
def _get_processed_row_from_single_raw_row(self, raw_row: Any) -> Dict[str, Any]:
return {
"userid": torch.Tensor([raw_row["userid"]]),
"label": torch.Tensor([raw_row["label"]]),
}
@pytest.mark.usefixtures("prepare_dataset_data_loader_with_batch")
class TestDatasetDataLoaderWithBatch:
def test_batch_size(self) -> None:
# pyre-ignore[16]: for pytest fixture
file_path = pkg_resources.resource_filename(__name__, self.test_csv_path)
dataset = TestDataset(file_path)
fl_data_sharder = instantiate(ColumnSharderConfig(sharding_col="userid"))
data_loader = FLDatasetDataLoaderWithBatch(
dataset,
dataset,
dataset,
fl_data_sharder,
# pyre-ignore[16]: for pytest fixture
self.train_batch_size,
# pyre-ignore[16]: for pytest fixture
self.eval_batch_size,
# pyre-ignore[6]
self.test_batch_size,
)
assertEqual(
len(list(data_loader.fl_train_set())),
# pyre-ignore[16]: for pytest fixture
self.total_data_count / self.train_batch_size,
)
assertEqual(
len(list(data_loader.fl_eval_set())),
self.total_data_count / self.eval_batch_size,
)
assertEqual(
len(list(data_loader.fl_test_set())),
self.total_data_count / self.test_batch_size,
)
|
python
|
import click
import json
from pathlib import Path
from PIL import Image
import pycountry
import re
import shlex
import subprocess
import time
import traceback
import youtube_dl
SUB_LANGUAGES = ['en', 'en-US', 'en-UK', 'en-us', 'en-uk', 'de', 'de-DE', 'de-de', 'un']
ytops = {
'outtmpl': '{}.%(ext)s',
'hls_prefer_native': True,
'nocheckcertificate': True,
'writesubtitles': True,
'subtitleslangs': SUB_LANGUAGES,
'subtitlesformat': 'srt/vtt',
'keepvideo': True,
'skip_unavailable_fragments': False,
'writethumbnail': True,
'fixup': 'never',
'socket_timeout': 10,
'postprocessors': [
{
'key': 'FFmpegSubtitlesConvertor',
'format': 'srt',
},
],
}
ytdl = youtube_dl.YoutubeDL(ytops)
@click.command()
@click.argument('file', type=click.Path(exists=True))
@click.option('-o', '--output',
default='.',
type=click.Path(file_okay=False))
@click.option('-v', '--verbose', is_flag=True)
def main(file, output, verbose):
file_path = Path(file)
output_path = Path(output)
with file_path.open('r') as f:
links = f.read().splitlines()
done_path = file_path.with_suffix('.done')
if not done_path.is_file():
done_path.touch()
with done_path.open('r') as f:
done = f.read().splitlines()
done = [int(i) for i in done if i.isdigit()]
path_forced_info = file_path.with_suffix('.forced.json')
if path_forced_info.is_file():
with path_forced_info.open('r') as f:
forced_info = json.load(f)
else:
forced_info = {}
for index, link in enumerate(links):
if index in done:
continue
if not link or not link.startswith('http'):
continue
print('[Queue] Processing link #{}'.format(index))
# path_video = Path('{}.mp4'.format(index))
# path_subs = Path('{}.en.srt'.format(index))
# path_thumb_jpg = Path('{}.jpg'.format(index))
# path_thumb_png = Path('{}.png'.format(index))
# if path_video.is_file() and path_subs.is_file() and (path_thumb_jpg.is_file() or path_thumb_png.is_file()):
# print('[Queue] All files for #{} already exist, proceed to muxing')
# else:
result = False
while not result:
info = download(index, link, verbose)
if info:
info = {**info, **forced_info}
result = mux(index, info, output_path)
if result:
with done_path.open('a+') as f:
f.write(str(index) + '\n')
# source_files = Path('.').glob('{}.*'.format(id))
# for file in source_files:
# file.unlink()
def download(id, url, verbose=False, rewrite_info=False):
ytdl = youtube_dl.YoutubeDL(ytops)
ytdl.params['outtmpl'] = ytops['outtmpl'].format(id)
try:
info_path = Path('{}.info.json'.format(id))
if not rewrite_info and info_path.is_file():
with info_path.open('r') as f:
info = json.load(f)
else:
info = ytdl.extract_info(url, download=False)
with info_path.open('w') as f:
json.dump(info, f)
ytdl.process_info(info)
except:
if verbose:
traceback.print_exc()
print("[Queue] Download of item #{} failed. Try again in 30".format(id))
time.sleep(30)
return None
return info
def alpha3(alpha2):
return pycountry.languages.get(alpha_2=alpha2[0:2]).alpha_3
def mux(id, info, output):
fix_aac = False
paths_video = []
paths_audio = []
if 'requested_formats' in info and info['requested_formats']:
for format in info['requested_formats']:
path = Path('{}.f{}.{}'.format(id, format['format_id'], format['ext']))
if format['vcodec'] != 'none':
paths_video.append(path)
if format['acodec'] != 'none':
lang = format.get('language') or info.get('language') or 'en'
paths_audio.append((path if path not in paths_video else None, alpha3(lang)))
if format['acodec'].startswith('mp4a'):
fix_aac = True
else:
paths_video.append(Path('{}.{}'.format(id, info['ext'])))
paths_audio.append((None, info.get('language') or 'en'))
if 'acodec' in info and info['acodec'].startswith('mp4a'):
fix_aac = True
if not paths_video and not paths_audio:
print('[Queue] Muxing failed because no video/audio files were found.')
return False
# Determine which subtitles are available for muxing
available_subs = [i for i in info['subtitles'] if i in SUB_LANGUAGES] if 'subtitles' in info else []
paths_sub = []
for lang in available_subs:
path_sub = Path('{}.{}.vtt'.format(id, lang))
if not path_sub.is_file():
path_sub = Path('{}.{}.srt'.format(id, lang))
if path_sub.is_file():
pass #fix_srt(path_sub)
else:
path_sub = None
if path_sub:
if lang == 'un':
lang = 'en'
paths_sub.append((path_sub, alpha3(lang)))
# Check if thumbnail was downloaded
orig_path_thumb = Path('{}.jpg'.format(id))
if not orig_path_thumb.is_file():
orig_path_thumb = Path('{}.png'.format(id))
if not orig_path_thumb.is_file():
orig_path_thumb = None
# Check if the thumbnail's resolution is high enough
if orig_path_thumb:
with Image.open(orig_path_thumb) as img:
if img.size[1] < 480:
orig_path_thumb = None
# Determine mime type of thumbnail
if orig_path_thumb:
if orig_path_thumb.suffix == '.jpg':
thumb_mime = 'image/jpeg'
elif orig_path_thumb.suffix == '.png':
thumb_mime = 'image/png'
else:
orig_path_thumb = None
# Rename thumbnail file to 'thumbnail'
if orig_path_thumb:
path_thumb = Path('thumbnail{}'.format(orig_path_thumb.suffix))
orig_path_thumb.rename(path_thumb)
else:
path_thumb = None
if 'movie' in info and info['movie'] == True:
title = info['title']
path_final = Path('{}.mkv'.format(title))
else:
if 'episode' not in info or not info['episode']:
if 'title' in info and info['title']:
info['episode'] = info['title']
else:
info['episode'] = 'EPISODE'
if 'season_number' not in info or not info['season_number']:
print('\a')
info['season_number'] = click.prompt(
'No season number found. Please specify for episode "{}"'.format(info['episode']),
default=0)
r = re.match(r'(?:(?:Episode|Folge|Part) )*(?P<nr>\d+)(?:/\d)*', info['episode'])
if r and info['title'] != info['episode']:
info['episode'] = info['title']
if 'series' not in info or not info['series']:
info['series'] = 'SERIES'
info['episode'] = re.sub(r'( \(?\d+/\d+\)?)$', '', info['episode'])
info['episode'] = info['episode'].replace('Season {}'.format(info['season_number']), '')
info['episode'] = info['episode'].replace(info['series'], '')
info['episode'] = info['episode'].replace(' - ', '')
info['episode'] = info['episode'].strip()
if 'episode_number' not in info or not info['episode_number']:
if r and r['nr'].isdigit():
info['episode_number'] = int(r['nr'])
else:
print('\a')
info['episode_number'] = click.prompt(
'No episode number found. Please specify for episode "{}"'.format(info['episode']),
default=100 + id)
if 'episode_offset' in info and info['episode_offset'] is not None:
info['episode_number'] += info['episode_offset']
title = '{series} - {season_number}x{episode_number:02d} - {episode}'.format(**info)
path_final = Path('{}/{}/{}.mkv'.format(output.absolute(), info['season_number'], title))
if path_final.is_file():
path_final.unlink()
path_final.parent.mkdir(exist_ok=True, parents=True)
cmd = 'ffmpeg'
for path_video in paths_video:
cmd += ' -i {}'.format(shlex.quote(str(path_video.absolute())))
for path_audio in paths_audio:
if path_audio[0]:
cmd += ' -i {}'.format(shlex.quote(str(path_audio[0].absolute())))
if paths_sub:
for path_sub in paths_sub:
cmd += ' -i {}'.format(shlex.quote(str(path_sub[0].absolute())))
chapters = info.get('chapters', [])
if chapters:
path_chapters = Path('{}.meta'.format(id))
with path_chapters.open('w') as f:
def ffmpeg_escape(txt):
return re.sub(r'(=|;|#|\\|\n)', r'\\\1', txt)
metadata_file_content = ';FFMETADATA1\n'
for chapter in chapters:
metadata_file_content += '[CHAPTER]\nTIMEBASE=1/1000\n'
metadata_file_content += 'START=%d\n' % (chapter['start_time'] * 1000)
metadata_file_content += 'END=%d\n' % (chapter['end_time'] * 1000)
chapter_title = chapter.get('title')
if chapter_title:
metadata_file_content += 'title=%s\n' % ffmpeg_escape(chapter_title)
f.write(metadata_file_content)
cmd += ' -i "{}" -map_metadata 1'.format(path_chapters.absolute())
cmd += ' -c:v copy -bsf:v "filter_units=remove_types=6" -c:a copy'
if fix_aac:
cmd += ' -bsf:a aac_adtstoasc'
if paths_sub:
cmd += ' -c:s copy'
for index, path_video in enumerate(paths_video):
cmd += ' -disposition:v:{} +default'.format(index)
for index, path_audio in enumerate(paths_audio):
cmd += ' -metadata:s:a:{} language={}'.format(index, path_audio[1])
cmd += ' -disposition:a:{} +default'.format(index)
if paths_sub:
for index, path_sub in enumerate(paths_sub):
cmd += ' -metadata:s:s:{} language={}'.format(index, path_sub[1])
if path_thumb:
cmd += ' -attach {} -metadata:s:t mimetype={}'.format(shlex.quote(str(path_thumb.absolute())), thumb_mime)
cmd += ' -metadata title={}'.format(shlex.quote(title))
if 'description' in info:
cmd += ' -metadata description={}'.format(shlex.quote(info['description']))
cmd += ' -metadata comment={}'.format(shlex.quote(info['description']))
cmd += ' -metadata summary={}'.format(shlex.quote(info['description']))
cmd += ' -metadata synopsis={}'.format(shlex.quote(info['description']))
cmd += ' -y {}'.format(shlex.quote(str(path_final.absolute())))
print('[Queue] Mux #{}: "{}"'.format(id, cmd))
proc = subprocess.run(cmd, shell=True)
if path_thumb:
path_thumb.rename(orig_path_thumb)
if proc.returncode != 0:
print('[Queue]] Muxing #{} failed.'.format(id))
return False
print('[Queue] #{} successfully muxed.'.format(id))
return True
def fix_srt(path):
print("[Queue] Fix corrupted SRT conversion")
with path.open('r', encoding='utf-8-sig') as f:
srt = f.read().split('\n')
i = 0
while i < len(srt):
if srt[i]:
if srt[i].isdigit():
i += 1
if re.match(r'[0-9][0-9]:[0-9][0-9]:[0-9][0-9],[0-9][0-9][0-9] --> [0-9][0-9]:[0-9][0-9]:[0-9][0-9],[0-9][0-9][0-9]', srt[i]):
i += 1
while srt[i+1]:
srt[i] += '<br />' + srt[i+1]
del srt[i+1]
i += 1
with path.open('w', encoding='utf-8-sig') as f:
f.write('\n'.join(srt))
|
python
|
from dataclasses import dataclass
@dataclass
class SpotifyConfig:
client_id: str
client_secret: str
refresh_token: str
|
python
|
"""
Application load balancer stack for running ConsoleMe on ECS
"""
from aws_cdk import aws_ec2 as ec2
from aws_cdk import aws_elasticloadbalancingv2 as lb
from aws_cdk import core as cdk
class ALBStack(cdk.NestedStack):
"""
Application load balancer stack for running ConsoleMe on ECS
"""
def __init__(
self,
scope: cdk.Construct,
id: str,
vpc: ec2.Vpc,
consoleme_sg: ec2.SecurityGroup,
**kwargs
) -> None:
super().__init__(scope, id, **kwargs)
# ECS Load Balancer
consoleme_alb = lb.ApplicationLoadBalancer(
self,
"ConsoleMeALB",
vpc=vpc,
vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC),
internet_facing=True,
)
consoleme_alb.add_security_group(
ec2.SecurityGroup.from_security_group_id(
self,
"ImportedConsoleMeLBSG",
security_group_id=consoleme_sg.security_group_id,
mutable=False,
)
)
self.consoleme_alb = consoleme_alb
|
python
|
# --------------------------------------------------
# Gene class
# Authors: Thomas Schwarzl, [email protected]
# --------------------------------------------------
import gzip
import logging
from collections import OrderedDict, defaultdict
from HTSeq import GenomicArray, GenomicArrayOfSets, GenomicPosition, GenomicFeature
from .GeneRegion import GeneRegion
from .GTxFeature import GTxFeature
from .output import Output
"""
The Class 'Gene' stores genomic gene location information
and flattens gene info.
It sores the Exon information of different transcripts
and calculates the Introns from the inbetween spaces.
Also, the CDS and UTR information is stored and flatten.
"""
class Gene:
__CDS__ = "CDS"
__3UTR__ = "3UTR"
__5UTR__ = "5UTR"
__EXON__ = "exon"
__INTRON__ = "intron"
"""
'Gene': init
"""
def __init__(self, feature,splitExons=True,processGeneOnlyInformation=True):
logging.debug("Initializing new gene")
self.splitExons = splitExons
self.processGeneOnlyInformation = processGeneOnlyInformation
self.stranded = True
self.forwardSymbol = "+"
self.reverseSymbol = "-"
self.regionPriorityOrder = (self.__CDS__,self.__3UTR__,self.__5UTR__)
self.features = {
self.__CDS__ : GenomicArrayOfSets('auto', stranded = self.stranded),
self.__3UTR__ : GenomicArrayOfSets('auto', stranded = self.stranded),
self.__5UTR__ : GenomicArrayOfSets('auto', stranded = self.stranded),
self.__EXON__ : GenomicArrayOfSets('auto', stranded = self.stranded)
}
self.details = GenomicArrayOfSets('auto', stranded = self.stranded)
self.startSites = {
self.__CDS__ : GenomicArray('auto', stranded = self.stranded),
self.__3UTR__ : GenomicArray('auto', stranded = self.stranded),
self.__5UTR__ : GenomicArray('auto', stranded = self.stranded),
self.__EXON__ : GenomicArray('auto', stranded = self.stranded)
}
self.endSites = {
self.__CDS__ : GenomicArray('auto', stranded = self.stranded),
self.__3UTR__ : GenomicArray('auto', stranded = self.stranded),
self.__5UTR__ : GenomicArray('auto', stranded = self.stranded),
self.__EXON__ : GenomicArray('auto', stranded = self.stranded)
}
self.storage = {
self.__CDS__ : [],
self.__3UTR__ : [],
self.__5UTR__ : []
}
self.exonTotal = 0
self.intronTotal = 0
# List of unprocessed gene regions
self.rawRegionList = None
# List of GeneRegions containing the processed exons and introns
self.regionList = list()
# List of GeneRegions containing the processed CDS, UTRs, exons and introns
self.detailedRegionList = None
# gene symbol and gene type
self.symbol = "NA"
self.geneType = "NA"
# Determines if Exons should be split into CDS and UTR regions
self.feature = feature
"""
'Gene': Getters for convenience
"""
def getGeneInterval(self):
return self.feature.getInterval()
def getGeneType(self):
return self.feature.getGeneType()
def getGeneName(self):
return self.feature.getGeneName()
def getStrand(self):
return self.feature.getStrand()
def getId(self):
return self.feature.getName()
"""
'Gene': Basic functions
"""
def isForwardStrand(self):
return self.getStrand() == self.forwardSymbol
def isReverseStrand(self):
return self.getStrand() == self.reverseSymbol
"""
Returns true if the object was processed, which is required for returning output
"""
def isProcessed(self):
return len(self.regionList) > 0
"""
Calculating total exon and total intron number
"""
def calculateTotalExonAndIntronCount(self):
logging.debug("processing gene")
self.exonTotal = int((len(self.rawRegionList) + 1) / 2)
self.intronTotal = int(self.exonTotal - 1)
logging.debug("exon total: {}, intron total: {}".format(str(self.exonTotal), str(self.intronTotal)))
"""
Merge/Flatten the exons and store into regionList
"""
def mergeExons(self):
self.rawRegionList = list(self.features[ self.__EXON__ ][ self.getGeneInterval() ].steps())
def exonsWereAdded(self):
return len(list(self.features[ self.__EXON__ ].steps())) > 1
"""
Gene annotation:
Calculates the exon, intron regions and their corresponding flags
The processed annotation is stored in the variable self.regionList
"""
def process(self):
logging.debug("processing gene")
if self.exonsWereAdded():
self.mergeExons()
# in this function the flattened gene definition is created, also all start and end sites
# of exons and introns are stored so alternative isoforms of exons and introns can be assigned.
# those are used to calculate flags, (no alternative isoform, or 5' or 3' isoform, or 5' and 3'
# isoform variants
self.calculateExonsAndIntrons()
self.processStoredRegions()
if self.splitExons:
self.splitExonsIntoUTRandCDSRegions()
else:
if self.processGeneOnlyInformation:
logging.debug("adding an exon for a gene without exon information.")
feature = GTxFeature(GenomicFeature("name", self.__EXON__, self.feature.feature.iv))
self.addRegion(feature, self.__EXON__)
self.process()
else:
raise Exception("The gene annotation file provides a gene without any exon information. " +
"Either add exon information to the annotation or use the processGeneOnlyInformation " +
"of htseq-clip.")
"""
Gene.getGeneLength:
returns length of gene (sum of all gene regions).
"""
def length(self):
length = 0
for region in self.regionList:
length += region.length()
return length
"""
Gene: Checks if the preprocessing was done, which is an essential step
before providing output
"""
def checkIfProcessed(self):
if not self.isProcessed():
self.process()
"""
Gene: Calculates exon and intron regions and their corresponding flags
and number, as well as the total count (e.g. exon 1/10).
"""
def calculateExonsAndIntrons(self):
self.calculateTotalExonAndIntronCount()
logging.debug("calculating exons and introns and their corresponding flags, number, and total count")
exonIndex = 1 if self.isForwardStrand() else self.exonTotal
intronIndex = 1 if self.isForwardStrand() else self.intronTotal
regionIndex = 0
for (regionInterval, regionInfo) in self.rawRegionList:
logging.debug("processing region {} - {}".format(str(regionInterval), str(regionInfo)))
region = GeneRegion(self, regionInterval)
# if exon
if len(regionInfo) > 0: # == "exon":
logging.debug("processing exon")
upstreamFlag = self.getExonUpstreamFlag(regionInterval)
downstreamFlag = self.getExonDownstreamFlag(regionInterval)
region.type = self.__EXON__
region.index = exonIndex
region.total = self.exonTotal
region.upstreamFlag = upstreamFlag
region.downstreamFlag = downstreamFlag
exonIndex = self.nextIndex(exonIndex)
# else intron
else:
logging.debug("processing intron")
region.type = self.__INTRON__
region.index = intronIndex
region.total = self.intronTotal
# intron flags will be determined after all exon flags have been assigned
intronIndex = self.nextIndex(intronIndex)
# update regionList
self.regionList.append(region)
regionIndex += 1
# calculate all intron flags
self._regionListSanityCheck()
self.calculateIntronFlags()
def _regionListSanityCheck(self):
'''
Sanity check for region list, make sure that the first and last indices are always exons and
two regions of the same type are never next to each other
'''
removeIndices = list() # indices to remove from the region list
prevType = None
for i,rd in enumerate(self.regionList):
if (i==0 or i==len(self.regionList)-1) and rd.type != self.__EXON__:
removeIndices.append(i)
elif prevType == rd.type:
removeIndices.append(i)
prevType = rd.type
if len(removeIndices)>0:
for ri in removeIndices:
del self.regionList[ri]
"""
Calculates flags for Introns by retrieving flags from the neighboring regions.
"""
def calculateIntronFlags(self):
logging.debug("calculating intron flags directional")
regionIndex = 1
while regionIndex < len(self.regionList):
self.regionList[ regionIndex ].upstreamFlag = self.regionList[ self.previousIndex(regionIndex) ].downstreamFlag
self.regionList[ regionIndex ].downstreamFlag = self.regionList[ self.previousIndex(regionIndex) ].upstreamFlag
regionIndex += 2
"""
Splits exon regions into UTR and CDS regions and stores all regions
to 'detailedRegionList'
"""
def splitExonsIntoUTRandCDSRegions(self):
logging.debug("calculate UTR and CDS regions")
self.detailedRegionList = []
for region in self.regionList:
for newRegion in region.split():
self.detailedRegionList.append(newRegion)
"""
Gene: These functions increments or decrements the index,
depending on strandness of Gene
"""
def nextIndex(self, index):
return self.indexStep(index, 1)
def previousIndex(self, index):
return self.indexStep(index, -1)
def indexStep(self, index, step):
if self.isForwardStrand():
index += step
elif self.isReverseStrand():
index -= step
else:
raise Exception("Sorry, but htseq-clip cannot work with unstranded data yet.")
return index
"""
Gene: Get the strand specific exon upstream flag
"""
def getExonUpstreamFlag(self, interval):
length = len(list(self.startSites[ self.__EXON__ ][ interval ].steps()))
logging.debug("Get exon start sites in interval: %s" % length)
return length == 1
"""
Gene: Get the strand specific exon downstream flag
"""
def getExonDownstreamFlag(self, interval):
length = len(list(self.endSites[ self.__EXON__ ][ interval ].steps()))
logging.debug("Get exon end sites in interval: %s" % length)
return length == 1
"""
Gene: Adding a gff/gtf feature to the gene.
"""
def add(self, feature):
if self.isProcessed():
raise Exception("Gene already has been processed, you cannot add additional regions.")
logging.debug("adding info %s " % feature)
self.assertFeatureBelongsToGene(feature)
if feature.isExon():
logging.debug("invoking addRegion %s %s" % (feature, self.__EXON__))
self.addRegion(feature, self.__EXON__)
elif feature.isCDS():
self.storeRegion(feature, self.__CDS__)
elif feature.is5UTR():
self.storeRegion(feature, self.__5UTR__)
elif feature.is3UTR():
self.storeRegion(feature, self.__3UTR__)
else:
logging.debug("ignoring feature %s" % feature)
"""
Assert that the feature belongs to the gene
"""
def assertFeatureBelongsToGene(self, feature):
if not self.feature.getGeneId() == feature.getGeneId():
raise SyntaxError("The order of gene and gene features in the input file are incorrect. The current feature does not belong to the gene being processed.")
"""
Gene: Adds a region to genes and adds the corresponding
start and end sites. Start and end sites are later used to
figure out alternative isoforms for a given exon or intron
"""
def addRegion(self, feature, name):
logging.debug("adding {} {} {}".format(name, feature, feature.getInterval()))
self.features[ name ][ feature.getInterval() ] = name
self.startSites[ name ][ GenomicPosition( feature.getInterval().chrom,
feature.getInterval().start_d,
strand = feature.getStrand() ) ] = True
self.endSites[ name ][ GenomicPosition( feature.getInterval().chrom,
feature.getInterval().end_d,
strand = feature.getStrand() ) ] = True
self.details[ feature.getInterval() ] = name
logging.debug("finished adding %s %s" % (name, feature))
"""
Gene: stores the region for processing.
Processing can be only started once all the regions are stored.
"""
def storeRegion(self, feature, name):
logging.debug("storing %s %s" % (str(name), str(feature)))
self.storage[ name ].append(feature)
"""
This function adds regions according to the region priority order
"""
def processStoredRegions(self):
logging.debug("processing stored regions")
for regionName in self.regionPriorityOrder:
for feature in self.storage[ regionName ]:
self.addRegion(feature, regionName)
""""
Gene: Outputs the genomic location info of
exons and introns to a bed format
"""
def toBed(self):
self.checkIfProcessed()
for region in self.regionList:
yield region.toBed()
""""
Gene: Outputs the genomic location info of
CDS, UTRs, introns and remaining exons to
a bed format
"""
def toBedDetailed(self):
# calculates all the coordinates if not calculated
self.checkIfProcessed()
regionList = None
if self.splitExons:
regionList = self.detailedRegionList
else:
regionList = self.regionList
# write the individual regions to the output
for region in regionList:
yield region.toBed()
|
python
|
from __future__ import print_function
import os
import subprocess
import shlex
from getpass import getuser
from distutils.command.build import build # type: ignore
from setuptools import (
find_packages,
setup,
Command
)
import numpy as np
CUSTOM_COMMANDS = [
shlex.split(command_line) for command_line in [
'apt-get update',
'apt-get --assume-yes install libxml2',
'apt-get --assume-yes install poppler-utils',
'apt-get --assume-yes install libgl1'
]
]
with open(os.path.join('requirements.txt'), 'r', encoding='utf-8') as f:
REQUIRED_PACKAGES = f.readlines()
packages = find_packages()
# This class handles the pip install mechanism.
class CustomBuild(build):
"""A build command class that will be invoked during package install.
The package built using the current setup.py will be staged and later
installed in the worker using `pip install package'. This class will be
instantiated during install for this specific scenario and will trigger
running the custom commands specified.
"""
sub_commands = build.sub_commands + [('CustomCommands', None)]
class CustomCommands(Command):
"""A setuptools Command class able to run arbitrary commands."""
def initialize_options(self):
pass
def finalize_options(self):
pass
def _run_custom_command(self, command_list):
if getuser() != 'root' or os.environ.get('SCIENCEBEAM_GYM_NO_APT'):
print('Skipping setup command (not root): %s' % command_list)
return
print('Running command: %s' % command_list)
p = subprocess.Popen(
command_list,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
# Can use communicate(input='y\n'.encode()) if the command run requires
# some confirmation.
stdout_data, _ = p.communicate()
print('Command output: %s' % stdout_data)
if p.returncode != 0:
raise RuntimeError(
'Command %s failed: exit code: %s (output: %s)' %
(command_list, p.returncode, stdout_data)
)
def run(self):
for command in CUSTOM_COMMANDS:
self._run_custom_command(command)
setup(
name='sciencebeam_gym',
version='0.0.1',
install_requires=REQUIRED_PACKAGES,
packages=packages,
include_package_data=True,
description='ScienceBeam Gym',
include_dirs=[np.get_include()],
cmdclass={
'build': CustomBuild,
'CustomCommands': CustomCommands
}
)
|
python
|
# Copyright 2004-present, Facebook. All Rights Reserved.
from django.contrib.auth.decorators import login_required
from django.urls import path
from . import views
urlpatterns = [
# products
path(
"store/<int:storeId>/products/create",
login_required(views.createProduct),
name="createProduct",
),
path(
"store/<int:storeId>/product/<str:productId>/update",
login_required(views.updateProduct),
name="updateProduct",
),
path(
"store/<int:storeId>/products",
login_required(views.viewProducts),
name="viewProducts",
),
path(
"store/<int:storeId>/product/<str:productId>",
login_required(views.viewProduct),
name="viewProduct",
),
# catalog
path(
"store/<int:storeId>/catalog/sync",
login_required(views.syncCatalog),
name="syncCatalog",
),
# dummy products
path(
"store/<int:storeId>/create_dummy_products",
login_required(views.createDummyProducts),
name="createDummyProducts",
),
]
|
python
|
from detective import functions
import math
MOCK_ATTRIBUTE = {
"battery_level": 61,
"unit_of_measurement": "°C",
"friendly_name": "Living room sensor temperature",
"device_class": "temperature",
}
def test_device_class():
"""Test get_device_class"""
assert functions.get_device_class(MOCK_ATTRIBUTE) == MOCK_ATTRIBUTE["device_class"]
assert functions.get_device_class({}) == functions.UNKNOWN
def test_get_unit_of_measurement():
"""Test get_unit_of_measurement"""
assert (
functions.get_unit_of_measurement(MOCK_ATTRIBUTE)
== MOCK_ATTRIBUTE["unit_of_measurement"]
)
assert functions.get_unit_of_measurement({}) == functions.UNKNOWN
def test_get_friendly_name():
"""Test get_friendly_name"""
assert (
functions.get_friendly_name(MOCK_ATTRIBUTE) == MOCK_ATTRIBUTE["friendly_name"]
)
assert functions.get_friendly_name({}) == functions.UNKNOWN
|
python
|
from argparse import ArgumentParser
import examples.example02.tasks
from cline.cli import ArgumentParserCli, RegisteredTasks
class ExampleCli(ArgumentParserCli):
def make_parser(self) -> ArgumentParser:
parser = ArgumentParser()
parser.add_argument("a", help="first number", nargs="?")
parser.add_argument("b", help="second number", nargs="?")
parser.add_argument("--sub", help="subtracts", action="store_true")
parser.add_argument("--sum", help="sums", action="store_true")
return parser
def register_tasks(self) -> RegisteredTasks:
return [
examples.example02.tasks.SumTask,
examples.example02.tasks.SubtractTask,
]
|
python
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from pandas.compat import StringIO
import sys
import re
import os
import ntpath
def file_to_df(filename):
with open(filename, 'r') as file:
contents = file.read()
# Read run configurations
start = contents.find("[", contents.find("Run Configurations"))
end = contents.find("config")
config = pd.DataFrame(eval(contents[start:end]))
#config['config'] = [i for i in range(config.shape[0])]
# Read data
data = pd.read_csv(StringIO(contents[end:]), delim_whitespace=True)
# Join tables and return
return data.join(config, on='config', how='inner')
def _is_nr(str):
return str.find("NR") != -1
is_nr = np.vectorize(_is_nr)
def _gap(arr):
if len(arr) < 2:
raise Exception('length 0 or 1 pattern')
return arr[1]
gap = np.vectorize(_gap)
def _pct(name):
return int(re.findall('\d+', name)[0])
pct = np.vectorize(_pct)
colors = {'on':'#26CAD3', 'off':'black'}
def prefetch(name):
n = ntpath.basename(os.path.splitext(name)[0])
n = n[n.find("_")+1:n.rfind("_")]
if n.find("on") != -1:
return "on"
elif n.find("off") != -1:
return "off"
raise Exception("could not determine prefetch")
def get_arch(name):
n = ntpath.basename(os.path.splitext(name)[0])
n = n[n.rfind("_")+1:]
return n
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: python3 {} input.dat".format(sys.argv[0]))
exit(1)
#Read files
dfs = []
for f in sys.argv[1:]:
tmp = file_to_df(f)
tmp['arch'] = get_arch(f)
tmp['gap'] = gap(tmp['name'])
tmp['prefetch'] = prefetch(f)
tmp['norm_local'] = tmp['bw(MB/s)'] / max(tmp['bw(MB/s)'])
dfs.append(tmp)
df = pd.concat(dfs)
df['norm_global'] = df['bw(MB/s)'] / max(df['bw(MB/s)'])
df['bw(GB/s)'] = df['bw(MB/s)'] / 1000
all_arch = ""
for key, _ in df.groupby(['arch']):
all_arch = all_arch + key
SMALL_SIZE = 15
MEDIUM_SIZE = 18
BIGGER_SIZE = 20
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
#Plot against global max
print("Making plot 1")
fix, ax = plt.subplots()
#ax = plt.subplot(1, 2, 1) #plot 1
for key, grp in df.groupby(['prefetch']):
ax = grp.plot(ax=ax, kind='line', x='config', y='bw(GB/s)', label=key, color=colors[key])
print(key)
ax.set_xlabel("Stride (Doubles)")
ax.set_ylabel("Bandwidth (GB/s)")
ax.get_legend().remove()
ax2 = ax.twinx()
for key, grp in df.groupby(['prefetch']):
ax2 = grp.plot(ax=ax2, kind='line', x='config', y='norm_global', label=key, color=colors[key], linewidth=4)
print(key)
MODE="normal"
print(f"Mode is {MODE}")
ax2.set_ylabel("Normalized bandwidth")
ax2.get_legend().remove()
if MODE == "opt":
ax2.axhline(y=1, linestyle=":", color="black")
ax2.axhline(y=.5, linestyle=":", color="black", xmin=1/7)
ax2.axhline(y=.25, linestyle=":", color="black", xmin=2/7)
ax2.axhline(y=.125, linestyle=":", color="black", xmin=3/7)
ax2.axhline(y=.0625, linestyle=":", color="black", xmin=4/7)
else:
ax2.axhline(y=1, linestyle=":", color="black")
ax2.axhline(y=.5, linestyle=":", color="black")
ax2.axhline(y=.25, linestyle=":", color="black")
ax2.axhline(y=.125, linestyle=":", color="black")
ax2.axhline(y=.0625, linestyle=":", color="black")
plt.yticks([.0625, .125, .25, .5, 1], ['1/16', '1/8', '1/4', '1/2', '1'])
ax2.set_xticklabels([])
# ax.set_xticklabels([7, 7, 7, 7, 7, 7, 7, 7])
#def format_func(value, tick_number):
# return r"$2^{}$".format(value)
#ax.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
ax.xaxis.set_major_formatter(ticker.ScalarFormatter())
ax.set_xticklabels(["$2^{{{}}}$".format(x) for x in range(0,8)])
#ax.xaxis.set_major_formatter(ticker.ScalarFormatter())
#ax.ticklabel_format(axis='x', useMathText=True)
ax.tick_params(axis=u'y', which=u'both',length=0)
ax2.tick_params(axis=u'both', which=u'both',length=0)
if MODE == "opt":
for a in [ax, ax2]:
a.spines["top"].set_visible(False)
a.spines["right"].set_visible(False)
a.spines["bottom"].set_visible(False)
a.spines["left"].set_visible(False)
plt.legend(loc='best', title='Prefetch')
# Change figure size
fig = plt.gcf()
fig.set_size_inches(6, 6)
outname = "prefetch_{}_{}.png".format(all_arch, MODE)
plt.savefig(outname, transparent=True, bbox_inches='tight')
ax.xaxis.set_major_formatter(ticker.ScalarFormatter())
ax.set_xticklabels(["$2^{{{}}}$".format(x) for x in range(0,8)])
print("Saved plot to {}".format(outname))
handles,labels = ax.get_legend_handles_labels()
handles = [handles[1], handles[0]]
labels = [labels[1], labels[0]]
plt.clf()
exit(0)
#Plot against a local max
fig, ax = plt.subplots()
for key, grp in df.groupby(['arch']):
ax = grp.plot(ax=ax, kind='line', x='gap', y='norm_local', label=key)
print(key)
plt.legend(loc='best', title='log2(gap)')
outname = "ustride_local.png"
plt.savefig(outname)
plt.clf()
|
python
|
import os
import socket
import struct
import sys
import select
os.system("")
UDP_PORT = 13117
MESSAGE_LEN = 1024
GAME_TIME = 10
sockUDP = None
sockTCP = None
# Colors for prints
class Colors:
GREEN = '\033[32m'
BLUE = '\033[34m'
PINK = '\033[35m'
def printMessageOrRead():
# wait for read or write from client or server
readers, _, _ = select.select([sys.stdin, sockTCP], [], [])
for reader in readers:
if reader is sockTCP:
data = sockTCP.recv(MESSAGE_LEN).decode()
print(Colors.PINK + data)
else:
ch = sys.stdin.read(1)
sockTCP.sendall(ch.encode())
printMessageOrRead() # because will need to print server answer
def start_udp():
global sockUDP
# create UDP socket with the variables we need
sockUDP = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) # init UDP socket
sockUDP.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sockUDP.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sockUDP.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sockUDP.bind(("", UDP_PORT))
# assume server started and need to connect
message, location = sockUDP.recvfrom(MESSAGE_LEN)
server_ip_address = str(location[0])
return server_ip_address, message
def start_tcp(ip, tcp_port):
global sockTCP
# create custom TCP socket
sockTCP = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockTCP.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sockTCP.connect((ip, tcp_port))
def print_start(first_print):
if first_print == 0:
print(Colors.GREEN + "Client started, listening for offer requests...")
first_print = 0
while True:
print_start(first_print)
first_print += 1
# waits for server suggestion
# part 1 get udp message
ip, message = start_udp()
try:
# part 2 connect to server
printBool = True
cookie, msg_type, tcp_port = struct.unpack('IBH', message)
# part 3 make sure it's the correct server
if cookie == 0xabcddcba and msg_type == 0x2: # check if message is as expected
print(Colors.GREEN + "Received offer from " + ip + " attempting to connect...")
start_tcp(ip, tcp_port)
# part 4 start game with group name
group_name = input(Colors.PINK + 'Enter your group name: ')
sockTCP.sendall(group_name.encode()) # send team's name to server
# part 5 start game
print(sockTCP.recv(MESSAGE_LEN).decode()) # the game begin message
print(sockTCP.recv(MESSAGE_LEN).decode()) # math question
# part 6 play game, win or lost
printMessageOrRead()
print(Colors.BLUE + "Server disconnected, listening for offer requests...")
first_print = 0
else:
print(Colors.GREEN + "Bad UDP Message Format")
# got message not in the expected format
except Exception as e:
pass
|
python
|
from tests.testcases import TestCaseUsingRealAPI
from vortexasdk import Products, Geographies, Corporations, Vessels
endpoints_and_searchterms = [
(Products(), "Gasoil"),
(Geographies(), "China"),
(Corporations(), "Oil"),
(Vessels(), "Ocean"),
]
class TestSearchReal(TestCaseUsingRealAPI):
def test_search_exact_match_yields_fewer_results_than_non_exact_match(
self,
):
for endpoint, term in endpoints_and_searchterms:
result_loose_match = endpoint.search(
term=term, exact_term_match=False
)
result_exact_match = endpoint.search(
term=term, exact_term_match=True
)
assert len(result_exact_match) < len(result_loose_match)
def test_search_exact_match_yields_exact_matches_only(self):
for endpoint, term in endpoints_and_searchterms:
result_exact_match = endpoint.search(
term=term, exact_term_match=True
)
actual_names = {e["name"] for e in result_exact_match}
# result must be the exact term, or must contain no results if there's no reference objects with that names
assert actual_names == {term} or actual_names == set()
|
python
|
from app import app
from flask import request, session
from helpers.database import *
from helpers.hashpass import *
from helpers.mailer import *
from bson import json_util, ObjectId
import json
def checkloginusername():
username = request.form["username"]
check = db.users.find_one({"username": username})
if check is None:
return "No User"
else:
return "User exists"
def checkloginpassword():
username = request.form["username"]
check = db.users.find_one({"username": username})
password = request.form["password"]
# hashpassword = getHashed(password)
if password == check["password"]:
sendmail(subject="Login on Flask Admin Boilerplate", sender="Flask Admin Boilerplate", recipient=check["email"], body="You successfully logged in on Flask Admin Boilerplate")
session["username"] = username
return "correct"
else:
return "wrong"
def checkusername():
username = request.form["username"]
check = db.users.find_one({"username": username})
if check is None:
return "Available"
else:
return "Username taken"
def registerUser():
fields = [k for k in request.form]
values = [request.form[k] for k in request.form]
data = dict(zip(fields, values))
user_data = json.loads(json_util.dumps(data))
# user_data["password"] = getHashed(user_data["password"])
# user_data["confirmpassword"] = getHashed(user_data["confirmpassword"])
db.users.insert(user_data)
sendmail(subject="Registration for Flask Admin Boilerplate", sender="Flask Admin Boilerplate", recipient=user_data["email"], body="You successfully registered on Flask Admin Boilerplate")
print("Done")
|
python
|
"""Hyperparameters from paper """
import numpy as np
import torch.optim as optim
from .model import DQN, DuelingDQN
class AtariHyperparams:
ALGO = "DQN"
SEED = 2
LOG_DISPLAY_FREQ = 10
# Image sizing
WIDTH = 84
HEIGHT = 84
# Number of most recent frames given as input to Q-network
AGENT_HISTORY = 4
STATE_DIMS = (AGENT_HISTORY, WIDTH, HEIGHT)
NORMALIZE = False
DISCOUNT = 0.99
MINIBATCH_SIZE = 32
REPLAY_SIZE = int(1e6)
REPLAY_S_DTYPE = np.uint8
# Number of network updates between target network updates
# TARGET_NETWORK_UPDATE_FREQ = 10000
TARGET_NETWORK_UPDATE_FREQ = 2500 # every 10000 frames
# Number of times an action is repeated, i.e. number of frames skipped
ACTION_REPEAT = 4
# Num actions (ignoring repeats) performed before Gradient descent update
NETWORK_UPDATE_FREQUENCY = 4
# Parameters for network learning
OPTIMIZER = optim.RMSprop
LEARNING_RATE = 0.00025
GRADIENT_MOMENTUM = 0.95
SQUARED_GRADIENT_MOMENTUM = 0.95
MIN_SQUARED_GRADIENT = 0.01
OPTIMIZER_KWARGS = {
"lr": LEARNING_RATE,
"momentum": GRADIENT_MOMENTUM,
"eps": MIN_SQUARED_GRADIENT
}
GRAD_CLIP = [-1, 1]
# for reward
R_CLIP = [-1, 1]
# Exploration
EXPLORATION_SCHEDULE = "Linear"
INITIAL_EXPLORATION = 1.0
FINAL_EXPLORATION = 0.1
FINAL_EXPLORATION_FRAME = 1000000
# Number of frames to run random policy and before learning starts
REPLAY_START_SIZE = 50000
# Max number of "do nothing" actions to be performed at start of episode
NO_OP_MAX = 30
# Network architecture
INPUT_DIMS = (WIDTH, HEIGHT, AGENT_HISTORY)
LAYER_1 = {"type": "convolutional",
"filters": 32, "kernel_size": (8, 8),
"stride": 4, "activation": "relu"}
LAYER_2 = {"type": "convolutional",
"filters": 64, "kernel_size": (4, 4),
"stride": 2, "activation": "relu"}
LAYER_3 = {"type": "convolutional",
"filters": 64, "kernel_size": (3, 3),
"stride": 1, "activation": "relu"}
LAYER_4 = {"type": "fully_connected",
"size": 512, "activation": "relu"}
OUTPUT = {"type": "fully_connected"}
MODEL = DQN
# training duration (50 million)
TRAINING_FRAMES = int(5e7)
# Other hyperparams not related to paper
# Model Save Freq
MODEL_SAVE_FREQ = int(1e6)
# Evaluation
EVAL_FREQ = int(1e6)
EVAL_STEPS = 125000
EVAL_EPSILON = 0.05
@classmethod
def set_seed(cls, seed):
cls.SEED = seed
@classmethod
def set_mode(cls, mode='dqn'):
if mode == "testing":
print("WARNING: using test hyperparams")
input("Press any key to continue..")
cls.ALGO += "_test"
cls.REPLAY_SIZE = int(1e4)
cls.REPLAY_START_SIZE = 100
cls.INITIAL_EXPLORATION = 0.1
cls.TARGET_NETWORK_UPDATE_FREQ = 1000
cls.EVAL_FREQ = 2000
cls.EVAL_STEPS = 1000
cls.MODEL_SAVE_FREQ = 2500
cls.LOG_DISPLAY_FREQ = 1
cls.MINIBATCH_SIZE = 12
elif mode == "eval":
cls.ALGO += "_eval"
cls.REPLAY_SIZE = int(1e4)
elif mode == "ddqn":
print("Using DDQN hyperparams")
cls.ALGO = "DDQN"
elif mode == "ddqn-tuned":
print("Using DDQN-Tuned hyperparams")
cls.ALGO = "DDQN-Tuned"
cls.TARGET_NETWORK_UPDATE_FREQ = 30000
cls.FINAL_EXPLORATION = 0.01
cls.EVAL_EPSILON = 0.001
elif mode == "dqn":
print("Using DQN hyperparams")
pass
elif mode == "duelingdqn":
print("Using Dueling DQN hyperparams")
cls.ALGO = "DuelingDQN"
cls.MODEL = DuelingDQN
elif mode == "normalized":
print("Using normalized observations")
cls.NORMALIZE = True
cls.REPLAY_S_DTYPE = np.float16
elif mode == "pong_tuned":
print("Using pong tuned hyperparams")
cls.REPLAY_SIZE = 100000
cls.REPLAY_START_SIZE = 10000
cls.INITIAL_EXPLORATION = 1.0
cls.FINAL_EXPLORATION = 0.02
cls.FINAL_EXPLORATION_FRAME = 100000
# this corresponds to updating every 1000 frames
cls.TARGET_NETWORK_UPDATE_FREQ = 250
cls.OPTIMIZER = optim.Adam
cls.OPTIMIZER_KWARGS = {"lr": 1e-4}
else:
raise ValueError("Unsupported Hyper param mode")
@classmethod
def get_all_hyperparams(cls):
all_kwargs = {}
for k, v in cls.__dict__.items():
if not any([k.startswith("__"),
isinstance(v, classmethod)]):
all_kwargs[k] = v
return all_kwargs
|
python
|
"""
Queue backend abstraction manager.
"""
import json
import logging
import sched
import socket
import time
import uuid
from typing import Any, Callable, Dict, List, Optional, Union
from pydantic import BaseModel, validator
import qcengine as qcng
from qcfractal.extras import get_information
from ..interface.data import get_molecule
from .adapters import build_queue_adapter
from .compress import compress_results
__all__ = ["QueueManager"]
class QueueStatistics(BaseModel):
"""
Queue Manager Job statistics
"""
# Dynamic quantities
total_successful_tasks: int = 0
total_failed_tasks: int = 0
total_worker_walltime: float = 0.0
total_task_walltime: float = 0.0
maximum_possible_walltime: float = 0.0 # maximum_workers * time_delta, experimental
active_task_slots: int = 0
# Static Quantities
max_concurrent_tasks: int = 0
cores_per_task: int = 0
memory_per_task: float = 0.0
last_update_time: float = None
def __init__(self, **kwargs):
if kwargs.get("last_update_time", None) is None:
kwargs["last_update_time"] = time.time()
super().__init__(**kwargs)
@property
def total_completed_tasks(self) -> int:
return self.total_successful_tasks + self.total_failed_tasks
@property
def theoretical_max_consumption(self) -> float:
"""In Core Hours"""
return self.max_concurrent_tasks * self.cores_per_task * (time.time() - self.last_update_time) / 3600
@property
def active_cores(self) -> int:
return self.active_task_slots * self.cores_per_task
@property
def active_memory(self) -> float:
return self.active_task_slots * self.memory_per_task
@validator("cores_per_task", pre=True)
def cores_per_tasks_none(cls, v):
if v is None:
v = 1
return v
class QueueManager:
"""
This object maintains a computational queue and watches for finished tasks for different
queue backends. Finished tasks are added to the database and removed from the queue.
Attributes
----------
client : FractalClient
A FractalClient connected to a server.
queue_adapter : QueueAdapter
The DBAdapter class for queue abstraction
errors : dict
A dictionary of current errors
logger : logging.logger. Optional, Default: None
A logger for the QueueManager
"""
def __init__(
self,
client: "FractalClient",
queue_client: "BaseAdapter",
logger: Optional[logging.Logger] = None,
max_tasks: int = 200,
queue_tag: Optional[Union[str, List[str]]] = None,
manager_name: str = "unlabeled",
update_frequency: Union[int, float] = 2,
verbose: bool = True,
server_error_retries: Optional[int] = 1,
stale_update_limit: Optional[int] = 10,
cores_per_task: Optional[int] = None,
memory_per_task: Optional[float] = None,
nodes_per_task: Optional[int] = None,
cores_per_rank: Optional[int] = 1,
scratch_directory: Optional[str] = None,
retries: Optional[int] = 2,
configuration: Optional[Dict[str, Any]] = None,
):
"""
Parameters
----------
client : FractalClient
A FractalClient connected to a server
queue_client : BaseAdapter
The DBAdapter class for queue abstraction
logger : Optional[logging.Logger], optional
A logger for the QueueManager
max_tasks : int, optional
The maximum number of tasks to hold at any given time
queue_tag : str, optional
Allows managers to pull from specific tags
manager_name : str, optional
The cluster the manager belongs to
update_frequency : Union[int, float], optional
The frequency to check for new tasks in seconds
verbose : bool, optional
Whether or not to have the manager be verbose (logger level debug and up)
server_error_retries : Optional[int], optional
How many times finished jobs are attempted to be pushed to the server in
in the event of a server communication error.
After number of attempts, the failed jobs are dropped from this manager and considered "stale"
Set to `None` to keep retrying
stale_update_limit : Optional[int], optional
Number of stale update attempts to keep around
If this limit is ever hit, the server initiates as shutdown as best it can
since communication with the server has gone wrong too many times.
Set to `None` for unlimited
cores_per_task : Optional[int], optional
How many CPU cores per computation task to allocate for QCEngine
None indicates "use however many you can detect"
memory_per_task : Optional[float], optional
How much memory, in GiB, per computation task to allocate for QCEngine
None indicates "use however much you can consume"
nodes_per_task : Optional[int], optional
How many nodes to use per task. Used only for node-parallel tasks
cores_per_rank: Optional[int], optional
How many CPUs per rank of an MPI application. Used only for node-parallel tasks
scratch_directory : Optional[str], optional
Scratch directory location to do QCEngine compute
None indicates "wherever the system default is"'
retries : Optional[int], optional
Number of retries that QCEngine will attempt for RandomErrors detected when running
its computations. After this many attempts (or on any other type of error), the
error will be raised.
configuration : Optional[Dict[str, Any]], optional
A JSON description of the settings used to create this object for the database.
"""
# Setup logging
if logger:
self.logger = logger
else:
self.logger = logging.getLogger("QueueManager")
self.name_data = {"cluster": manager_name, "hostname": socket.gethostname(), "uuid": str(uuid.uuid4())}
self._name = self.name_data["cluster"] + "-" + self.name_data["hostname"] + "-" + self.name_data["uuid"]
self.client = client
self.cores_per_task = cores_per_task
self.memory_per_task = memory_per_task
self.nodes_per_task = nodes_per_task or 1
self.scratch_directory = scratch_directory
self.retries = retries
self.cores_per_rank = cores_per_rank
self.configuration = configuration
self.queue_adapter = build_queue_adapter(
queue_client,
logger=self.logger,
cores_per_task=self.cores_per_task,
memory_per_task=self.memory_per_task,
nodes_per_task=self.nodes_per_task,
scratch_directory=self.scratch_directory,
cores_per_rank=self.cores_per_rank,
retries=self.retries,
verbose=verbose,
)
self.max_tasks = max_tasks
self.queue_tag = queue_tag
self.verbose = verbose
self.statistics = QueueStatistics(
max_concurrent_tasks=self.max_tasks,
cores_per_task=(cores_per_task or 0),
memory_per_task=(memory_per_task or 0),
update_frequency=update_frequency,
)
self.scheduler = None
self.update_frequency = update_frequency
self.periodic = {}
self.active = 0
self.exit_callbacks = []
# Server response/stale job handling
self.server_error_retries = server_error_retries
self.stale_update_limit = stale_update_limit
self._stale_updates_tracked = 0
self._stale_payload_tracking = []
self.n_stale_jobs = 0
# QCEngine data
self.available_programs = qcng.list_available_programs()
self.available_procedures = qcng.list_available_procedures()
# Display a warning if there are non-node-parallel programs and >1 node_per_task
if self.nodes_per_task > 1:
for name in self.available_programs:
program = qcng.get_program(name)
if not program.node_parallel:
self.logger.warning(
"Program {} is not node parallel," " but manager will use >1 node per task".format(name)
)
# Print out configuration
self.logger.info("QueueManager:")
self.logger.info(" Version: {}\n".format(get_information("version")))
if self.verbose:
self.logger.info(" Name Information:")
self.logger.info(" Cluster: {}".format(self.name_data["cluster"]))
self.logger.info(" Hostname: {}".format(self.name_data["hostname"]))
self.logger.info(" UUID: {}\n".format(self.name_data["uuid"]))
self.logger.info(" Queue Adapter:")
self.logger.info(" {}\n".format(self.queue_adapter))
if self.verbose:
self.logger.info(" QCEngine:")
self.logger.info(" Version: {}".format(qcng.__version__))
self.logger.info(" Task Cores: {}".format(self.cores_per_task))
self.logger.info(" Task Mem: {}".format(self.memory_per_task))
self.logger.info(" Task Nodes: {}".format(self.nodes_per_task))
self.logger.info(" Cores per Rank: {}".format(self.cores_per_rank))
self.logger.info(" Scratch Dir: {}".format(self.scratch_directory))
self.logger.info(" Programs: {}".format(self.available_programs))
self.logger.info(" Procedures: {}\n".format(self.available_procedures))
# DGAS Note: Note super happy about how this if/else turned out. Looking for alternatives.
if self.connected():
# Pull server info
self.server_info = client.server_information()
self.server_name = self.server_info["name"]
self.server_version = self.server_info["version"]
self.server_query_limit = self.server_info["query_limit"]
if self.max_tasks > self.server_query_limit:
self.max_tasks = self.server_query_limit
self.logger.warning(
"Max tasks was larger than server query limit of {}, reducing to match query limit.".format(
self.server_query_limit
)
)
self.heartbeat_frequency = self.server_info["heartbeat_frequency"]
# Tell the server we are up and running
payload = self._payload_template()
payload["data"]["operation"] = "startup"
payload["data"]["configuration"] = self.configuration
self.client._automodel_request("queue_manager", "put", payload)
if self.verbose:
self.logger.info(" Connected:")
self.logger.info(" Version: {}".format(self.server_version))
self.logger.info(" Address: {}".format(self.client.address))
self.logger.info(" Name: {}".format(self.server_name))
self.logger.info(" Queue tag: {}".format(self.queue_tag))
self.logger.info(" Username: {}\n".format(self.client.username))
else:
self.logger.info(" QCFractal server information:")
self.logger.info(" Not connected, some actions will not be available")
def _payload_template(self):
meta = {
**self.name_data.copy(),
# Version info
"qcengine_version": qcng.__version__,
"manager_version": get_information("version"),
# User info
"username": self.client.username,
# Pull info
"programs": self.available_programs,
"procedures": self.available_procedures,
"tag": self.queue_tag,
# Statistics
"total_worker_walltime": self.statistics.total_worker_walltime,
"total_task_walltime": self.statistics.total_task_walltime,
"active_tasks": self.statistics.active_task_slots,
"active_cores": self.statistics.active_cores,
"active_memory": self.statistics.active_memory,
}
return {"meta": meta, "data": {}}
## Accessors
def name(self) -> str:
"""
Returns the Managers full name.
"""
return self._name
def connected(self) -> bool:
"""
Checks the connection to the server.
"""
return self.client is not None
def assert_connected(self) -> None:
"""
Raises an error for functions that require a server connection.
"""
if self.connected() is False:
raise AttributeError("Manager is not connected to a server, this operations is not available.")
## Start/stop functionality
def start(self) -> None:
"""
Starts up all IOLoops and processes.
"""
self.assert_connected()
self.scheduler = sched.scheduler(time.time, time.sleep)
heartbeat_time = int(0.4 * self.heartbeat_frequency)
def scheduler_update():
self.update()
self.scheduler.enter(self.update_frequency, 1, scheduler_update)
def scheduler_heartbeat():
self.heartbeat()
self.scheduler.enter(heartbeat_time, 1, scheduler_heartbeat)
self.logger.info("QueueManager successfully started.\n")
self.scheduler.enter(0, 1, scheduler_update)
self.scheduler.enter(0, 2, scheduler_heartbeat)
self.scheduler.run()
def stop(self, signame="Not provided", signum=None, stack=None) -> None:
"""
Shuts down all IOLoops and periodic updates.
"""
self.logger.info("QueueManager received shutdown signal: {}.\n".format(signame))
# Cancel all events
if self.scheduler is not None:
for event in self.scheduler.queue:
self.scheduler.cancel(event)
# Push data back to the server
self.shutdown()
# Close down the adapter
self.close_adapter()
# Call exit callbacks
for func, args, kwargs in self.exit_callbacks:
func(*args, **kwargs)
self.logger.info("QueueManager stopping gracefully.\n")
def close_adapter(self) -> bool:
"""
Closes down the underlying adapter.
"""
return self.queue_adapter.close()
## Queue Manager functions
def heartbeat(self) -> None:
"""
Provides a heartbeat to the connected Server.
"""
self.assert_connected()
payload = self._payload_template()
payload["data"]["operation"] = "heartbeat"
try:
self.client._automodel_request("queue_manager", "put", payload)
self.logger.debug("Heartbeat was successful.")
except IOError:
self.logger.warning("Heartbeat was not successful.")
def shutdown(self) -> Dict[str, Any]:
"""
Shutdown the manager and returns tasks to queue.
"""
self.assert_connected()
self.update(new_tasks=False, allow_shutdown=False)
payload = self._payload_template()
payload["data"]["operation"] = "shutdown"
try:
response = self.client._automodel_request("queue_manager", "put", payload, timeout=5)
response["success"] = True
shutdown_string = "Shutdown was successful, {} tasks returned to master queue."
except IOError:
# TODO something as we didnt successfully add the data
self.logger.warning("Shutdown was not successful. This may delay queued tasks.")
response = {"nshutdown": 0, "success": False}
shutdown_string = "Shutdown was not successful, {} tasks not returned."
nshutdown = response["nshutdown"]
if self.n_stale_jobs:
shutdown_string = shutdown_string.format(
f"{min(0, nshutdown-self.n_stale_jobs)} active and {nshutdown} stale"
)
else:
shutdown_string = shutdown_string.format(nshutdown)
self.logger.info(shutdown_string)
response["info"] = shutdown_string
return response
def add_exit_callback(self, callback: Callable, *args: List[Any], **kwargs: Dict[Any, Any]) -> None:
"""Adds additional callbacks to perform when closing down the server.
Parameters
----------
callback : callable
The function to call at exit
*args
Arguments to call with the function.
**kwargs
Kwargs to call with the function.
"""
self.exit_callbacks.append((callback, args, kwargs))
def _post_update(self, payload_data, allow_shutdown=True):
"""Internal function to post payload update"""
payload = self._payload_template()
# Update with data
payload["data"] = payload_data
try:
self.client._automodel_request("queue_manager", "post", payload, full_return=True)
except IOError:
# Trapped behavior elsewhere
raise
except Exception as fatal:
# Non IOError, something has gone very wrong
self.logger.error(
"An error was detected which was not an expected requests-type error. The manager "
"will attempt shutdown as best it can. Please report this error to the QCFractal "
"developers as this block should not be "
"seen outside of debugging modes. Error is as follows\n{}".format(fatal)
)
try:
if allow_shutdown:
self.shutdown()
finally:
raise fatal
def _update_stale_jobs(self, allow_shutdown=True):
"""
Attempt to post the previous payload failures
"""
clear_indices = []
for index, (results, attempts) in enumerate(self._stale_payload_tracking):
try:
self._post_update(results)
self.logger.info(f"Successfully pushed jobs from {attempts+1} updates ago")
self.logger.info(f"Tasks pushed: " + str(list(results.keys())))
clear_indices.append(index)
except IOError:
# Tried and failed
attempts += 1
# Case: Still within the retry limit
if self.server_error_retries is None or self.server_error_retries > attempts:
self._stale_payload_tracking[index][-1] = attempts
self.logger.warning(f"Could not post jobs from {attempts} updates ago, will retry on next update.")
# Case: Over limit
else:
self.logger.warning(
f"Could not post jobs from {attempts} ago and over attempt limit, marking " f"jobs as stale."
)
self.n_stale_jobs += len(results)
clear_indices.append(index)
self._stale_updates_tracked += 1
# Cleanup clear indices
for index in clear_indices[::-1]:
self._stale_payload_tracking.pop(index)
# Check stale limiters
if (
self.stale_update_limit is not None
and (len(self._stale_payload_tracking) + self._stale_updates_tracked) > self.stale_update_limit
):
self.logger.error("Exceeded number of stale updates allowed! Attempting to shutdown gracefully...")
# Log all not-quite stale jobs to stale
for (results, _) in self._stale_payload_tracking:
self.n_stale_jobs += len(results)
try:
if allow_shutdown:
self.shutdown()
finally:
raise RuntimeError("Exceeded number of stale updates allowed!")
def update(self, new_tasks: bool = True, allow_shutdown=True) -> bool:
"""Examines the queue for completed tasks and adds successful completions to the database
while unsuccessful are logged for future inspection.
Parameters
----------
new_tasks: bool, optional, Default: True
Try to get new tasks from the server
allow_shutdown: bool, optional, Default: True
Allow function to attempt graceful shutdowns in the case of stale job or fatal error limits.
Does not prevent errors from being raise, but mostly used to prevent infinite loops when update is
called from `shutdown` itself
"""
self.assert_connected()
self._update_stale_jobs(allow_shutdown=allow_shutdown)
results = self.queue_adapter.acquire_complete()
# Compress the stdout/stderr/error outputs
results = compress_results(results)
# Stats fetching for running tasks, as close to the time we got the jobs as we can
last_time = self.statistics.last_update_time
now = self.statistics.last_update_time = time.time()
time_delta_seconds = now - last_time
try:
self.statistics.active_task_slots = self.queue_adapter.count_active_task_slots()
log_efficiency = True
except NotImplementedError:
log_efficiency = False
timedelta_worker_walltime = time_delta_seconds * self.statistics.active_cores / 3600
timedelta_maximum_walltime = (
time_delta_seconds * self.statistics.max_concurrent_tasks * self.statistics.cores_per_task / 3600
)
self.statistics.total_worker_walltime += timedelta_worker_walltime
self.statistics.maximum_possible_walltime += timedelta_maximum_walltime
# Process jobs
n_success = 0
n_fail = 0
n_result = len(results)
task_cpu_hours = 0
error_payload = []
if n_result:
# For logging
failure_messages = {}
try:
self._post_update(results, allow_shutdown=allow_shutdown)
task_status = {k: "sent" for k in results.keys()}
except IOError:
if self.server_error_retries is None or self.server_error_retries > 0:
self.logger.warning("Post complete tasks was not successful. Attempting again on next update.")
self._stale_payload_tracking.append([results, 0])
task_status = {k: "deferred" for k in results.keys()}
else:
self.logger.warning("Post complete tasks was not successful. Data may be lost.")
self.n_stale_jobs += len(results)
task_status = {k: "unknown_error" for k in results.keys()}
self.active -= n_result
for key, result in results.items():
wall_time_seconds = 0
if result.success:
n_success += 1
if hasattr(result.provenance, "wall_time"):
wall_time_seconds = float(result.provenance.wall_time)
task_status[key] += " / success"
else:
task_status[key] += f" / failed: {result.error.error_type}"
failure_messages[key] = result.error
# Try to get the wall time in the most fault-tolerant way
try:
wall_time_seconds = float(result.input_data.get("provenance", {}).get("wall_time", 0))
except AttributeError:
# Trap the result.input_data is None, but let other attribute errors go
if result.input_data is None:
wall_time_seconds = 0
else:
raise
except TypeError:
# Trap wall time corruption, e.g. float(None)
# Other Result corruptions will raise an error correctly
wall_time_seconds = 0
task_cpu_hours += wall_time_seconds * self.statistics.cores_per_task / 3600
n_fail = n_result - n_success
# Now print out all the info
self.logger.info(f"Processed {len(results)} tasks: {n_success} succeeded / {n_fail} failed).")
self.logger.info(f"Task ids, submission status, calculation status below")
for task_id, status_msg in task_status.items():
self.logger.info(f" Task {task_id} : {status_msg}")
if n_fail:
self.logger.info("The following tasks failed with the errors:")
for task_id, error_info in failure_messages.items():
self.logger.info(f"Error message for task id {task_id}")
self.logger.info(" Error type: " + str(error_info.error_type))
self.logger.info(" Backtrace: \n" + str(error_info.error_message))
open_slots = max(0, self.max_tasks - self.active)
# Crunch Statistics
self.statistics.total_failed_tasks += n_fail
self.statistics.total_successful_tasks += n_success
self.statistics.total_task_walltime += task_cpu_hours
na_format = ""
float_format = ",.2f"
if self.statistics.total_completed_tasks == 0:
task_stats_str = "Task statistics unavailable until first tasks return"
worker_stats_str = None
else:
success_rate = self.statistics.total_successful_tasks / self.statistics.total_completed_tasks * 100
success_format = float_format
task_stats_str = (
f"Task Stats: Processed={self.statistics.total_completed_tasks}, "
f"Failed={self.statistics.total_failed_tasks}, "
f"Success={success_rate:{success_format}}%"
)
worker_stats_str = (
f"Worker Stats (est.): Core Hours Used={self.statistics.total_worker_walltime:{float_format}}"
)
# Handle efficiency calculations
if log_efficiency:
# Efficiency calculated as:
# sum_task(task_wall_time * nthread / task)
# -------------------------------------------------------------
if self.statistics.total_task_walltime == 0 or self.statistics.maximum_possible_walltime == 0:
efficiency_of_running = "(N/A yet)"
efficiency_of_potential = "(N/A yet)"
efficiency_format = na_format
else:
efficiency_of_running = (
self.statistics.total_task_walltime / self.statistics.total_worker_walltime * 100
)
efficiency_of_potential = (
self.statistics.total_worker_walltime / self.statistics.maximum_possible_walltime * 100
)
efficiency_format = float_format
worker_stats_str += f", Core Usage Efficiency: {efficiency_of_running:{efficiency_format}}%"
if self.verbose:
worker_stats_str += (
f", Core Usage vs. Max Resources Requested: " f"{efficiency_of_potential:{efficiency_format}}%"
)
self.logger.info(task_stats_str)
if worker_stats_str is not None:
self.logger.info(worker_stats_str)
if (new_tasks is False) or (open_slots == 0):
return True
# Get new tasks
payload = self._payload_template()
payload["data"]["limit"] = open_slots
try:
new_tasks = self.client._automodel_request("queue_manager", "get", payload)
except IOError:
# TODO something as we didnt successfully get data
self.logger.warning("Acquisition of new tasks was not successful.")
return False
self.logger.info("Acquired {} new tasks.".format(len(new_tasks)))
# Add new tasks to queue
self.queue_adapter.submit_tasks(new_tasks)
self.active += len(new_tasks)
return True
def await_results(self) -> bool:
"""A synchronous method for testing or small launches
that awaits task completion.
Returns
-------
bool
Return True if the operation completed successfully
"""
self.assert_connected()
self.update()
self.queue_adapter.await_results()
self.update(new_tasks=False)
return True
def list_current_tasks(self) -> List[Any]:
"""Provides a list of tasks currently in the queue along
with the associated keys.
Returns
-------
ret : list of tuples
All tasks currently still in the database
"""
return self.queue_adapter.list_tasks()
def test(self, n=1) -> bool:
"""
Tests all known programs with simple inputs to check if the Adapter is correctly instantiated.
"""
from qcfractal import testing
self.logger.info("Testing requested, generating tasks")
task_base = json.dumps(
{
"spec": {
"function": "qcengine.compute",
"args": [
{
"molecule": get_molecule("hooh.json").dict(encoding="json"),
"driver": "energy",
"model": {},
"keywords": {},
},
"program",
],
"kwargs": {},
},
"parser": "single",
}
)
programs = {
"rdkit": {"method": "UFF", "basis": None},
"torchani": {"method": "ANI1", "basis": None},
"psi4": {"method": "HF", "basis": "sto-3g"},
}
tasks = []
found_programs = []
for program, model in programs.items():
if testing.has_module(program):
self.logger.info("Found program {}, adding to testing queue.".format(program))
else:
self.logger.warning("Could not find program {}, skipping tests.".format(program))
continue
for x in range(n):
task = json.loads(task_base)
program_id = program + str(x)
task["id"] = program_id
task["spec"]["args"][0]["model"] = model
task["spec"]["args"][0]["keywords"] = {"e_convergence": (x * 1.0e-6 + 1.0e-6)}
task["spec"]["args"][1] = program
tasks.append(task)
found_programs.append(program_id)
self.queue_adapter.submit_tasks(tasks)
self.logger.info("Testing tasks submitting, awaiting results.\n")
self.queue_adapter.await_results()
results = self.queue_adapter.acquire_complete()
self.logger.info("Testing results acquired.")
missing_programs = results.keys() - set(found_programs)
if len(missing_programs):
self.logger.error("Not all tasks were retrieved, missing programs {}.".format(missing_programs))
raise ValueError("Testing failed, not all tasks were retrieved.")
else:
self.logger.info("All tasks retrieved successfully.")
failures = 0
fail_report = {}
for k, result in results.items():
if result.success:
self.logger.info(" {} - PASSED".format(k))
else:
self.logger.error(" {} - FAILED!".format(k))
failed_program = "Return Mangled!" # This should almost never be seen, but is in place as a fallback
for program in programs.keys():
if program in k:
failed_program = program
break
if failed_program not in fail_report:
fail_report[failed_program] = (
f"On test {k}:"
f"\nException Type: {result.error.error_type}"
f"\nException Message: {result.error.error_message}"
)
failures += 1
if failures:
self.logger.error("{}/{} tasks failed!".format(failures, len(results)))
self.logger.error(
f"A sample error from each program to help:\n" + "\n".join([e for e in fail_report.values()])
)
return False
else:
self.logger.info("All tasks completed successfully!")
return True
|
python
|
import logging
from importlib import import_module
from .groups import Groups
log = logging.getLogger(__name__)
class ConfigHelper:
@classmethod
def cog_name(cls, key):
return ''.join(map(str.capitalize, key.split('_')))
CONFIG_GROUP_MAPPINGS = [
('sudo', 'user', 'sudo'),
('sysbot_channels', 'channel', 'sysbots')
]
def __init__(self, bot, config):
self.bot = bot
self.configs = {
'guild': config.pop('guilds', {}),
'channel': config.pop('channels', {}),
'user': config.pop('users', {})
}
self.groups = {
'guild': Groups(config.pop('guild_groups', {}),
config.pop('guild_groups_save', None)),
'channel': Groups(config.pop('channel_groups', {}),
config.pop('channel_groups_save', None)),
'user': Groups(config.pop('user_groups', {}),
config.pop('user_groups_save', {})),
}
# Map some config from root to user/channel groups
for name, group_type, map_to in self.CONFIG_GROUP_MAPPINGS:
self.groups[group_type].update({map_to: config.pop(name, {})})
self.motd = config.pop('motd', 'motd.txt')
# The remaining configs are used to load cogs
self.cog_config = config
self.cog_list = set()
def get_config(self, category, key=None):
raw_config = self.configs[category]
# Filter all non-int keys as global config
config = {k: v for k, v in raw_config if not isinstance(k, int)}
# apply guild specific configs
if key in raw_config:
config.update(raw_config[key])
return config
def get_cog(self, key):
return self.bot.get_cog(self.cog_name(key))
def get_motd(self):
if not self.motd:
return
try:
with open(self.motd, 'r') as f:
motd = f.read().strip()
return motd
except FileNotFoundError:
log.info(f'{self.motd} not found, will not print MOTD.')
def template_variables_base(self, ctx):
result = {'ctx': ctx}
if hasattr(ctx, 'author'):
result.update(
name=ctx.author.name,
mention=ctx.author.mention)
return result
def register_all_cogs(self):
# Load the cogs from config file
for pkg, configs in self.cog_config.items():
for cog_key, args in configs.items():
module_name = f"{pkg}.{cog_key}"
cls_name = self.cog_name(cog_key)
module = import_module(module_name)
if not hasattr(module, cls_name):
log.warn('Unable to load cog %s from package %s!', cls_name, module_name)
continue
cls = getattr(module, cls_name)
# Create a cog instance (with config) and add to the bot
if hasattr(cls, 'Config'):
log.info('Load cog with config: %s', cls_name)
cls_config = getattr(cls, 'Config')
if isinstance(args, dict):
instance = cls(self.bot, cls_config(**args))
elif isinstance(args, list):
instance = cls(self.bot, cls_config(*args))
else:
instance = cls(self.bot, cls_config(args))
else:
log.info('Load cog: %s', cls_name)
instance = cls(self.bot)
self.bot.add_cog(instance)
self.cog_list.add(cls_name)
|
python
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.apps import apps, AppConfig
class PanopticonConfig(AppConfig):
name = "panopticon.django"
label = "panopticon"
verbose_name = "Panopticon"
def ready(self):
package_names = (a.module.__name__ for a in apps.get_app_configs())
from panopticon.loader import load_healthcheck_modules
load_healthcheck_modules(package_names)
from django.conf import settings
from panopticon.datadog import DataDog
DataDog.configure_settings(settings)
|
python
|
import numpy as np
from numba import jit
from scipy.sparse.construct import random
from ..tools import compute_dist
from ._utils import _CheckInputs
from .base import IndependenceTest, IndependenceTestOutput
from scipy.stats import rankdata
class HHG(IndependenceTest):
r"""
Heller Heller Gorfine (HHG) test statistic and p-value.
This is a powerful test for independence based on calculating pairwise
Euclidean distances and associations between these distance matrices. The
test statistic is a function of ranks of these distances, and is
consistent against similar tests
:footcite:p:`hellerConsistentMultivariateTest2013`. It can also operate on multiple
dimensions :footcite:p:`hellerConsistentMultivariateTest2013`.
Parameters
----------
compute_distance : str, callable, or None, default: "euclidean"
A function that computes the distance among the samples within each
data matrix.
Valid strings for ``compute_distance`` are, as defined in
:func:`sklearn.metrics.pairwise_distances`,
- From scikit-learn: [``"euclidean"``, ``"cityblock"``, ``"cosine"``,
``"l1"``, ``"l2"``, ``"manhattan"``] See the documentation for
:mod:`scipy.spatial.distance` for details
on these metrics.
- From scipy.spatial.distance: [``"braycurtis"``, ``"canberra"``,
``"chebyshev"``, ``"correlation"``, ``"dice"``, ``"hamming"``,
``"jaccard"``, ``"kulsinski"``, ``"mahalanobis"``, ``"minkowski"``,
``"rogerstanimoto"``, ``"russellrao"``, ``"seuclidean"``,
``"sokalmichener"``, ``"sokalsneath"``, ``"sqeuclidean"``,
``"yule"``] See the documentation for :mod:`scipy.spatial.distance` for
details on these metrics.
Set to ``None`` or ``"precomputed"`` if ``x`` and ``y`` are already distance
matrices. To call a custom function, either create the distance matrix
before-hand or create a function of the form ``metric(x, **kwargs)``
where ``x`` is the data matrix for which pairwise distances are
calculated and ``**kwargs`` are extra arguments to send to your custom
function.
**kwargs
Arbitrary keyword arguments for ``compute_distance``.
Notes
-----
The statistic can be derived as follows
:footcite:p:`hellerConsistentMultivariateTest2013`:
Let :math:`x` and :math:`y` be :math:`(n, p)` samples of random variables
:math:`X` and :math:`Y`. For every sample :math:`j \neq i`, calculate the
pairwise distances in :math:`x` and :math:`y` and denote this as
:math:`d_x(x_i, x_j)` and :math:`d_y(y_i, y_j)`. The indicator function is
denoted as :math:`\mathbb{1} \{ \cdot \}`. The cross-classification
between these two random variables can be calculated as
.. math::
A_{11} = \sum_{k=1, k \neq i,j}^n
\mathbb{1} \{ d_x(x_i, x_k) \leq d_x(x_i, x_j) \}
\mathbb{1} \{ d_y(y_i, y_k) \leq d_y(y_i, y_j) \}
and :math:`A_{12}`, :math:`A_{21}`, and :math:`A_{22}` are defined
similarly. This is organized within the following table:
+--------------------------------------------+--------------------------------------------+--------------------------------------------+---------------------------+
| | :math:`d_x(x_i, \cdot) \leq d_x(x_i, x_j)` | :math:`d_x(x_i, \cdot) \leq d_x(x_i, x_j)` | |
+--------------------------------------------+--------------------------------------------+--------------------------------------------+---------------------------+
| :math:`d_x(x_i, \cdot) \leq d_x(x_i, x_j)` | :math:`A_{11} (i,j)` | :math:`A_{12} (i,j)` | :math:`A_{1 \cdot} (i,j)` |
+--------------------------------------------+--------------------------------------------+--------------------------------------------+---------------------------+
| :math:`d_x(x_i, \cdot) > d_x(x_i, x_j)` | :math:`A_{21} (i,j)` | :math:`A_{22} (i,j)` | :math:`A_{2 \cdot} (i,j)` |
+--------------------------------------------+--------------------------------------------+--------------------------------------------+---------------------------+
| | :math:`A_{\cdot 1} (i,j)` | :math:`A_{\cdot 2} (i,j)` | :math:`n - 2` |
+--------------------------------------------+--------------------------------------------+--------------------------------------------+---------------------------+
Here, :math:`A_{\cdot 1}` and :math:`A_{\cdot 2}` are the column sums,
:math:`A_{1 \cdot}` and :math:`A_{2 \cdot}` are the row sums, and
:math:`n - 2` is the number of degrees of freedom. From this table, we can
calculate the Pearson's chi squared test statistic using,
.. math::
S(i, j) = \frac{(n-2) (A_{12} A_{21} - A_{11} A_{22})^2}
{A_{1 \cdot} A_{2 \cdot} A_{\cdot 1} A_{\cdot 2}}
and the HHG test statistic is then,
.. math::
\mathrm{HHG}_n (x, y) = \sum_{i=1}^n \sum_{j=1, j \neq i}^n S(i, j)
The p-value returned is calculated using a permutation test using
:math:`hyppo.tools.perm_test`.
The fast version of this test performs a multivariate independence test
based on univariate test statistics :footcite:p:`hellerMultivariateTestsOfAssociation2016`.
The univariate test statistic used is Hoeffding's independence test, derived as follows
:footcite:p:`sasHoeffdingDependenceCoefficient`:
Let :math:`x` and :math:`y` be :math:`(n, p)` samples of random variables
:math:`X` and :math:`Y`. A center point - the center of mass of points in 'X' and 'Y'
- is chosen. For every sample :math:`i`, calculate the distances from the center point
in :math:`x` and :math:`y` and denote this as :math:`d_x(x_i)`
and :math:`d_y(y_i)`. This will create a 1D collection of distances for each
sample group.
From these distances, we can calculate the Hoeffding's dependence score between
the two groups - denoted as :math:`D` - using,
.. math::
D &= \frac{(n-2) (n-3) D_{1} + D_{2} - 2(n-2) D_{3}}
{n (n-1) (n-2) (n-3) (n-4)}
D_{1} &= \sum_{i} (Q_{i}-1) (Q_{i}-2)
D_{2} &= \sum_{i} (R_{i} - 1) (R_{i} - 2) (S_{i} - 1) (S_{i} - 2)
D_{3} &= \sum_{i} {R_{i} - 2} (S_{i} - 2) (Q_{i}-1)
where :math:`R_{i}` is the rank of :math:`x_{i}`,
:math:`D_{i}` is the rank of :math:`y_{i}`,
:math:`Q_{i}` is the bivariate rank = 1 plus the number of points with both x and y
values less than the :math:`i`-th point.
:math:`D` is notably sensitive to ties and gets smaller the more pairs of variables with identical values.
If there are no ties in the data,D ranges between -0.5 and 1, with 1 indicating complete dependence.
:footcite:p:`sasHoeffdingDependenceCoefficient`
The p-value returned is calculated using a permutation test using
:meth:`hyppo.tools.perm_test`.
References
----------
.. footbibliography::
"""
def __init__(self, compute_distance="euclidean", **kwargs):
self.is_distance = False
if not compute_distance:
self.is_distance = True
self.auto = False
IndependenceTest.__init__(self, compute_distance=compute_distance, **kwargs)
def statistic(self, x, y):
r"""
Helper function that calculates the HHG test statistic.
Parameters
----------
x,y : ndarray of float
Input data matrices. ``x`` and ``y`` must have the same number of
samples. That is, the shapes must be ``(n, p)`` and ``(n, q)`` where
`n` is the number of samples and `p` and `q` are the number of
dimensions.
Alternatively, ``x`` and ``y`` can be distance matrices,
where the shapes must both be ``(n, n)``.
For fast version, ``x`` and ``y`` can be 1D collections of distances
from a chosen center point, where the shapes must be ``(n,1)`` or ``(n-1,1)``
depending on choice of center point.
Returns
-------
stat : float
The computed HHG statistic.
"""
distx = x
disty = y
if not self.is_distance:
distx, disty = compute_dist(
x, y, metric=self.compute_distance, **self.kwargs
)
if self.auto:
if not self.is_distance:
distx, disty = _centerpoint_dist(
x, y, metric=self.compute_distance, **self.kwargs
)
stat = hoeffdings(distx, disty)
else:
S = _pearson_stat(distx, disty)
mask = np.ones(S.shape, dtype=bool)
np.fill_diagonal(mask, 0)
stat = np.sum(S[mask])
self.stat = stat
return stat
def test(self, x, y, reps=1000, workers=1, auto=False, random_state=None):
r"""
Calculates the HHG test statistic and p-value.
Parameters
----------
x,y : ndarray of float
Input data matrices. ``x`` and ``y`` must have the same number of
samples. That is, the shapes must be ``(n, p)`` and ``(n, q)`` where
`n` is the number of samples and `p` and `q` are the number of
dimensions.
Alternatively, ``x`` and ``y`` can be distance matrices,
where the shapes must both be ``(n, n)``.
For fast version, ``x`` and ``y`` can be 1D collections of distances
from a chosen center point, where the shapes must be ``(n,1)`` or ``(n-1,1)``
depending on choice of center point.
reps : int, default: 1000
The number of replications used to estimate the null distribution
when using the permutation test used to calculate the p-value.
workers : int, default: 1
The number of cores to parallelize the p-value computation over.
Supply ``-1`` to use all cores available to the Process.
auto : boolean, default: False
Automatically use fast approximation of HHG test. :class:`hyppo.tools.perm_test`
will still be run.
Returns
-------
stat : float
The computed HHG statistic.
pvalue : float
The computed HHG p-value.
Examples
--------
>>> import numpy as np
>>> from hyppo.independence import HHG
>>> x = np.arange(7)
>>> y = x
>>> stat, pvalue = HHG().test(x, y)
>>> '%.1f, %.2f' % (stat, pvalue)
'160.0, 0.00'
In addition, the inputs can be distance matrices. Using this is the,
same as before, except the ``compute_distance`` parameter must be set
to ``None``.
>>> import numpy as np
>>> from hyppo.independence import HHG
>>> x = np.ones((10, 10)) - np.identity(10)
>>> y = 2 * x
>>> hhg = HHG(compute_distance=None)
>>> stat, pvalue = hhg.test(x, y)
>>> '%.1f, %.2f' % (stat, pvalue)
'0.0, 1.00'
"""
check_input = _CheckInputs(x, y, reps=reps)
x, y = check_input()
self.auto = auto
# Fast HHG Test
if self.auto:
distx, disty = _centerpoint_dist(
x, y, metric=self.compute_distance, **self.kwargs
)
self.is_distance = True
stat, pvalue = super(HHG, self).test(
distx, disty, reps, workers, is_distsim=False
)
else:
x, y = compute_dist(x, y, metric=self.compute_distance, **self.kwargs)
self.is_distance = True
stat, pvalue = super(HHG, self).test(x, y, reps, workers)
return IndependenceTestOutput(stat, pvalue)
@jit(nopython=True, cache=True)
def _pearson_stat(distx, disty): # pragma: no cover
"""Calculate the Pearson chi square stats"""
n = distx.shape[0]
S = np.zeros((n, n))
# iterate over all samples in the distance matrix
for i in range(n):
for j in range(n):
if i != j:
a = distx[i, :] <= distx[i, j]
b = disty[i, :] <= disty[i, j]
t11 = np.sum(a * b) - 2
t12 = np.sum(a * (1 - b))
t21 = np.sum((1 - a) * b)
t22 = np.sum((1 - a) * (1 - b))
denom = (t11 + t12) * (t21 + t22) * (t11 + t21) * (t12 + t22)
if denom > 0:
S[i, j] = ((n - 2) * (t12 * t21 - t11 * t22) ** 2) / denom
return S
def hoeffdings(x, y):
"""For fast HHG, calculates the Hoeffding's dependence statistic"""
R = rankdata(x)
S = rankdata(y)
# core processing
N = x.shape
D = _hoeffdings_d_calc(R, S, N)
return D
@jit(nopython=True, cache=True)
def _hoeffdings_d_calc(R, S, N): # pragma: no cover
Q = np.ones(N[0])
for i in range(0, N[0]):
Q[i] = Q[i] + np.sum(np.bitwise_and(R < R[i], S < S[i]))
Q[i] = Q[i] + 1 / 4 * (np.sum(np.bitwise_and(R == R[i], S == S[i])) - 1)
Q[i] = Q[i] + 1 / 2 * (np.sum(np.bitwise_and(R == R[i], S < S[i])))
Q[i] = Q[i] + 1 / 2 * (np.sum(np.bitwise_and(R < R[i], S == S[i])))
D1 = np.sum(np.multiply((Q - 1), (Q - 2)))
D2 = np.sum(
np.multiply(np.multiply((R - 1), (R - 2)), np.multiply((S - 1), (S - 2)))
)
D3 = np.sum(np.multiply(np.multiply((R - 2), (S - 2)), (Q - 1)))
D = (
30
* ((N[0] - 2) * (N[0] - 3) * D1 + D2 - 2 * (N[0] - 2) * D3)
/ (N[0] * (N[0] - 1) * (N[0] - 2) * (N[0] - 3) * (N[0] - 4))
)
return D
def _centerpoint_dist(x, y, metric, **kwargs):
"""Calculate the distance of x and y from center of mass"""
pointer = (np.mean(x, axis=0), np.mean(y, axis=0))
zx, zy = pointer
zx = np.array(zx).reshape(1, -1)
zy = np.array(zy).reshape(1, -1)
xin = np.concatenate((zx, x))
yin = np.concatenate((zy, y))
distx, disty = compute_dist(xin, yin, metric=metric, **kwargs)
# take first row of distance matrix = distance of sample points from center point
distx = np.delete(distx[0], 0)
distx = distx.reshape(-1, 1)
disty = np.delete(disty[0], 0)
disty = disty.reshape(-1, 1)
return distx, disty
|
python
|
from django.contrib import admin
from spaweb.models import Customer, ProductCategory, City
from spaweb.models import Product, Order, OrderItem
from spaweb.models import BusinessDirection, Topic
admin.site.register(ProductCategory)
admin.site.register(City)
admin.site.register(BusinessDirection)
admin.site.register(Topic)
class OrderItemInline(admin.TabularInline):
model = OrderItem
extra = 0
list_display = ['product', 'quantity', 'order_cost']
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
list_display = ['registrated_at','is_complete', 'is_digital', 'customer']
inlines = [OrderItemInline]
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
list_filter = ['category']
class OrderInline(admin.TabularInline):
model = Order
list_display = ['registrated_at', 'is_complete']
@admin.register(Customer)
class CustomerAdmin(admin.ModelAdmin):
inlines = [OrderInline]
|
python
|
# -*- coding: utf-8 -*-
"""Pype custom errors."""
class PypeError(Exception):
"""Custom error."""
pass
|
python
|
"""
ipython -i --pdb scripts/train_model.py -- --model cropped_jan02 --data 128_20151029 --use_cropped --as_grey --overwrite --no_test
"""
import numpy as np
from lasagne.layers import dnn
import lasagne as nn
import theano.tensor as T
import theano
from utils.nolearn_net import NeuralNet
from nolearn.lasagne.handlers import SaveWeights
from nolearn_utils.iterators import (
ShuffleBatchIteratorMixin,
BufferedBatchIteratorMixin,
RandomFlipBatchIteratorMixin,
AffineTransformBatchIteratorMixin,
AdjustGammaBatchIteratorMixin,
make_iterator
)
from nolearn_utils.hooks import (
SaveTrainingHistory,
PlotTrainingHistory,
EarlyStopping,
StepDecay
)
from utils import TrainSplit
# from utils.layers import batch_norm
# from utils.iterators import PairBatchIteratorMixin
# from utils.nonlinearities import low_temperature_softmax
# from utils.layers import TiedDropoutLayer
from utils.layer_macros import conv2dbn2 as conv2dbn
from utils.layer_macros import residual_block3 as residual_block
def float32(k):
return np.cast['float32'](k)
model_fname = './models/cropped_jan02.pkl'
model_accuracy_fname = './models/cropped_jan02_accuracy.pkl'
model_history_fname = './models/cropped_jan02_history.pkl'
model_graph_fname = './models/cropped_jan02_history.png'
image_size = 256
batch_size = 16
n_classes = 447
train_iterator_mixins = [
ShuffleBatchIteratorMixin,
BufferedBatchIteratorMixin,
RandomFlipBatchIteratorMixin,
AffineTransformBatchIteratorMixin,
AdjustGammaBatchIteratorMixin,
]
TrainIterator = make_iterator('TrainIterator', train_iterator_mixins)
test_iterator_mixins = [
]
TestIterator = make_iterator('TestIterator', test_iterator_mixins)
train_iterator_kwargs = dict(
batch_size=batch_size,
buffer_size=16,
flip_horizontal_p=0.5,
flip_vertical_p=0.5,
affine_p=1.,
affine_scale_choices=np.linspace(0.5, 1.5, 11),
affine_shear_choices=np.linspace(-0.25, 0.25, 11),
affine_translation_choices=np.arange(-32, 32, 1),
affine_rotation_choices=np.arange(-45, 45, 1),
adjust_gamma_p=0.5,
adjust_gamma_chocies=np.linspace(0.8, 1.2, 11)
)
train_iterator = TrainIterator(**train_iterator_kwargs)
test_iterator_kwargs = dict(
batch_size=batch_size,
)
test_iterator = TestIterator(**test_iterator_kwargs)
save_weights = SaveWeights(model_fname, only_best=True, pickle=False)
save_training_history = SaveTrainingHistory(model_history_fname)
plot_training_history = PlotTrainingHistory(model_graph_fname)
early_stopping = EarlyStopping(patience=100)
conv_kwargs = dict(
pad='same',
nonlinearity=nn.nonlinearities.very_leaky_rectify,
)
l = nn.layers.InputLayer(name='in', shape=(None, 3, image_size, image_size))
# 256x256
l = conv2dbn(
l, name='l1c1', num_filters=32, filter_size=(7, 7), stride=2,
**conv_kwargs
)
# 128x128
for i in range(3):
l = residual_block(
l, name='2c%s' % i,
# bottleneck=False,
num_filters=48, filter_size=(3, 3),
num_layers=2,
**conv_kwargs
)
# 128x128
for i in range(3):
actual_stride = 2 if i == 0 else 1
l = residual_block(
l, name='3c%s' % i,
# bottleneck=True, bottleneck_factor=4,
num_filters=64, filter_size=(3, 3), stride=actual_stride,
num_layers=2,
**conv_kwargs
)
# 64x64
for i in range(3):
actual_stride = 2 if i == 0 else 1
l = residual_block(
l, name='4c%s' % i,
# bottleneck=True, bottleneck_factor=4,
num_filters=80, filter_size=(3, 3), stride=actual_stride,
num_layers=3,
**conv_kwargs
)
# 32x32
for i in range(4):
actual_stride = 2 if i == 0 else 1
l = residual_block(
l, name='5c%s' % i,
# bottleneck=True, bottleneck_factor=4,
num_filters=96, filter_size=(3, 3), stride=actual_stride,
num_layers=3,
**conv_kwargs
)
# 16x16
for i in range(5):
actual_stride = 2 if i == 0 else 1
l = residual_block(
l, name='6c%s' % i,
# bottleneck=True, bottleneck_factor=4,
num_filters=128, filter_size=(3, 3), stride=actual_stride,
num_layers=3,
**conv_kwargs
)
# 8x8
# 8
l = nn.layers.dnn.Pool2DDNNLayer(l, name='gp', pool_size=8, mode='average_inc_pad')
l = nn.layers.DropoutLayer(l, name='gpdrop', p=0.5)
l = nn.layers.DenseLayer(l, name='out', num_units=n_classes, nonlinearity=nn.nonlinearities.softmax)
net = NeuralNet(
layers=l,
regression=False,
use_label_encoder=False,
objective_l2=1e-6,
# update=nn.updates.adam,
# update_learning_rate=1e-2,
update=nn.updates.nesterov_momentum,
update_learning_rate=theano.shared(float32(1e-1)),
train_split=TrainSplit(0.15, random_state=42, stratify=False),
batch_iterator_train=train_iterator,
batch_iterator_test=test_iterator,
on_epoch_finished=[
save_weights,
save_training_history,
plot_training_history,
early_stopping,
StepDecay('update_learning_rate', start=1e-1, stop=1e-5)
],
verbose=10,
max_epochs=2000,
)
|
python
|
from __future__ import annotations
import abc
import datetime
import decimal
import typing as t
import zoneinfo
# region: Bases
class SpecialValue(abc.ABC):
"""Represents a special value specific to an SQL Type."""
def __init__(self, python_value: t.Any, sql_value: str):
self._py_value = python_value
self.sql = sql_value
@property
def py(self) -> t.Any:
"""Python representation of the special value."""
if isinstance(self._py_value, t.Callable):
return self._py_value()
else:
return self._py_value
def __repr__(self):
return f'SpecialValue({self.py}, "{self.sql}")'
def __str__(self):
return self.sql
def __eq__(self, other: t.Any):
if not isinstance(other, SpecialValue):
return False
return self.sql == other.sql and self._py_value == other._py_value
class SQLTypeMeta(abc.ABCMeta):
"""Metaclass defining the behaviour of non-initialised SQLType classes."""
__types__ = dict()
py: t.Any
sql: str
def __init__(cls, *_args, **_kwargs):
super().__init__(cls)
if cls.__name__ == "SQLType":
return
SQLTypeMeta.__types__[cls.py] = cls
def __repr__(self):
py = getattr(self, "py", None)
if py:
py = f"'{py.__name__}'"
return f"<{self.__name__} python={py} sql='{getattr(self, 'sql', None)}'>"
def __str__(self):
return self.sql
def __eq__(self, other: t.Any):
if isinstance(other, SQLTypeMeta) or isinstance(type(other), SQLTypeMeta):
return self.sql == other.sql
return False
def __hash__(self):
return hash(repr(self))
class SQLType(metaclass=SQLTypeMeta):
"""Base class representing an SQL datatype."""
py: t.Any
sql: str
def __init__(self):
pass
def __repr__(self):
py = getattr(self, "py", None)
if py: # pragma: no cover
py = f"'{py.__name__}'"
return f"<{self.__class__.__name__} python={py} sql='{getattr(self, 'sql', None)}'>"
def __str__(self):
return self.sql
def __eq__(self, other: t.Any):
if isinstance(other, SQLTypeMeta) or isinstance(type(other), SQLTypeMeta):
return self.sql == other.sql
return False
def __hash__(self):
return hash(repr(self))
# endregion
# region: Numeric Types
class Integer(SQLType):
"""
Whole number from -32768 to +32767.
Uses 4 bytes of storage.
"""
py = int
sql = "INTEGER"
class SmallInteger(Integer):
"""
Whole number from -2147483648 to +2147483647.
Uses 2 bytes of storage.
"""
sql = "SMALLINT"
class BigInteger(Integer):
"""
Whole number from -9223372036854775808 to +9223372036854775807.
Uses 8 bytes of storage.
"""
sql = "BIGINT"
class Serial(Integer):
"""
Auto-incrementing number from 1 to 2147483647.
Uses 4 bytes of storage.
"""
sql = "SERIAL"
class SmallSerial(Serial):
"""
Auto-incrementing number from 1 to 32767.
Uses 2 bytes of storage.
"""
sql = "SMALLSERIAL"
class BigSerial(Serial):
"""
Auto-incrementing number from 1 to 9223372036854775807.
Uses 8 bytes of storage.
"""
sql = "BIGSERIAL"
class Numeric(SQLType):
"""
Precise decimal number with configurable precision and scale.
Uses 3 to 8 bytes overhead and 2 bytes for every 4 decimal digits.
"""
py = decimal.Decimal
sql = "NUMERIC"
# special values
not_a_number = SpecialValue(decimal.Decimal("NaN"), "'NaN'")
def __init__(self, precision: int, scale: int = 0): # noqa
self.precision = precision
self.scale = scale
self.sql = f"NUMERIC({precision}, {scale})"
class Decimal(Numeric):
"""
Precise decimal number with configurable precision and scale.
Uses 3 to 8 bytes storage overhead and 2 bytes for every 4 decimal digits.
"""
sql = "DECIMAL"
class Real(SQLType):
"""
Inexact floating-point number with a range of 1E-37 to 1E+37.
Uses 4 bytes of storage.
"""
py = float
sql = "REAL"
# special values
not_a_number = SpecialValue(float("NaN"), "'NaN'")
infinity = SpecialValue(float("inf"), "'Infinity'")
negative_infinity = SpecialValue(float("-inf"), "'-Infinity'")
class DoublePrecision(Real):
"""
Inexact floating-point number with a range of 1E-307 to 1E+308.
Uses 8 bytes of storage.
"""
sql = "DOUBLE PRECISION"
class Money(SQLType):
"""
Currency amount with a fixed precision ranging from -92233720368547758.08 to +92233720368547758.07.
Uses 8 bytes of storage.
"""
py = str
sql = "MONEY"
# endregion
# region: String Types
class Text(SQLType):
"""
Variable unlimited string.
Uses 1 byte of storage overhead for strings under 126 bytes in length, or 4 bytes if over that length.
"""
py = str
sql = "TEXT"
class ByteA(SQLType):
"""
Variable unlimited binary string.
Uses 1 byte of storage overhead for strings under 126 bytes in length, or 4 bytes if over that length.
"""
py = bytes
sql = "BYTEA"
# endregion
# region: DateTime Types
class Timestamp(SQLType):
"""
Timezone naive datetime.
Uses 8 bytes of storage.
"""
py = datetime.datetime
sql = "TIMESTAMP"
# special values
epoch = SpecialValue(datetime.datetime.utcfromtimestamp(0), "'Epoch'")
infinity = SpecialValue(datetime.datetime.max, "'Infinity'")
negative_infinity = SpecialValue(datetime.datetime.min, "'-Infinity'")
now = SpecialValue(datetime.datetime.now, "Now")
today = SpecialValue(datetime.datetime.today, "Today")
tomorrow = SpecialValue(lambda: datetime.datetime.today() + datetime.timedelta(days=1), "Tomorrow")
yesterday = SpecialValue(lambda: datetime.datetime.today() + datetime.timedelta(days=-1), "Yesterday")
def __init__(self, precision: int): # noqa
self.precision = precision
self.sql = f"TIMESTAMP({precision})"
class TimestampTZ(Timestamp):
"""
Timezone aware datetime.
Uses 8 bytes of storage.
"""
sql = "TIMESTAMP WITH TIME ZONE"
def __init__(self, precision: int): # noqa
self.precision = precision
self.sql = f"TIMESTAMP({precision}) WITH TIME ZONE"
class Date(SQLType):
"""
Date from 4713BC to 5874897AD.
Uses 4 bytes of storage.
"""
py = datetime.date
sql = "DATE"
# special values
epoch = SpecialValue(datetime.datetime.utcfromtimestamp(0).date(), "'Epoch'")
infinity = SpecialValue(datetime.datetime.max.date(), "'Infinity'")
negative_infinity = SpecialValue(datetime.datetime.min.date(), "'-Infinity'")
now = SpecialValue(datetime.date.today, "Now")
today = SpecialValue(datetime.date.today, "Today")
tomorrow = SpecialValue(lambda: datetime.date.today() + datetime.timedelta(days=1), "Tomorrow")
yesterday = SpecialValue(lambda: datetime.date.today() + datetime.timedelta(days=-1), "Yesterday")
class Time(SQLType):
"""
Timezone naive time of day.
Uses 8 bytes of storage.
"""
py = datetime.time
sql = "TIME"
# special values
now = SpecialValue(lambda: datetime.datetime.now().time(), "Now")
allballs = SpecialValue(datetime.time(0, 0, 0, 0, zoneinfo.ZoneInfo("UTC")), "Allballs")
def __init__(self, precision: int): # noqa
self.precision = precision
self.sql = f"TIME({precision})"
class Interval(SQLType):
"""
Time interval.
Uses 16 bytes of storage.
"""
py = datetime.timedelta
sql = "INTERVAL"
# endregion
# region: Boolean Types
class Boolean(SQLType):
"""
True or False value.
Uses 1 byte of storage.
"""
py = bool
sql = "BOOLEAN"
# endregion
# region: Collection Types
class JSON(SQLType):
"""JSON data objects."""
py = dict
sql = "JSON"
class JSONB(SQLType):
"""JSONB data objects."""
py = dict
sql = "JSONB"
class Array(SQLType):
"""Variable length array containing any supported type."""
py = list
def __init__(self, element_type: SQLType, size: int = ''): # noqa
self.element_type = element_type
self.element_size = size
self.sql = f"{element_type}[{size}]"
# endregion
SQLTypes = t.Union[SQLType, SQLTypeMeta]
|
python
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from collections import OrderedDict
import numpy as np
from oneflow.compatible import single_client as flow
import test_global_storage
from test_util import GenArgList
import unittest
def TestMultiInput(x1, x2):
return (
flow.user_op_builder("my_test_multi_input")
.Op("TestMultiInput")
.Input("x1", [x1])
.Input("x2", [x2])
.Output("y")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@flow.unittest.skip_unless_1n1d()
class Test_TestMultiInputGrad(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_TestMultiInput_grad_mirrored_inplace(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
shape = (
3,
3,
)
@flow.global_function(type="train", function_config=func_config)
def TestMultiInputJob():
with flow.scope.placement("gpu", "0:0"):
x1 = flow.get_variable(
"x1",
shape=shape,
dtype=flow.float,
initializer=flow.random_uniform_initializer(minval=-10, maxval=10),
trainable=True,
)
x2 = flow.get_variable(
"x2",
shape=shape,
dtype=flow.float,
initializer=flow.random_uniform_initializer(minval=-10, maxval=10),
trainable=True,
)
loss = TestMultiInput(x1, x2)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
).minimize(loss)
flow.watch(x1, test_global_storage.Setter("x1"))
flow.watch_diff(x1, test_global_storage.Setter("x1_diff"))
flow.watch(x2, test_global_storage.Setter("x2"))
flow.watch_diff(x2, test_global_storage.Setter("x2_diff"))
return loss
out = TestMultiInputJob().get()
x1_diff = test_global_storage.Get("x1_diff")
x2_diff = test_global_storage.Get("x2_diff")
expect_out = test_global_storage.Get("x1")
expect_x1_diff = np.ones(shape, dtype=np.float32)
expect_x2_diff = np.ones(shape, dtype=np.float32) * 2.0
# print(x1_diff, x2_diff)
# print(expect_x1_diff, expect_x2_diff)
assert np.allclose(out.numpy(), expect_out)
assert np.allclose(x1_diff, expect_x1_diff)
assert np.allclose(x2_diff, expect_x2_diff)
if __name__ == "__main__":
unittest.main()
|
python
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from obscmd import compat
def init():
global _global_dict
_global_dict = {}
def use_lock(key):
lock_key = key + '_lock'
if lock_key not in _global_dict:
_global_dict[lock_key] = compat.Lock()
return lock_key
def set_value(key, value):
_global_dict[key] = value
def set_value_lock(key, value):
lock_key = use_lock(key)
with _global_dict[lock_key]:
_global_dict[key].value = value
def get_value(key, default=None):
try:
return _global_dict[key]
except KeyError:
return default
def append_list_lock(key, value):
lock_key = use_lock(key)
with _global_dict[lock_key]:
_global_dict[key].append(value)
|
python
|
from hill import Hill
from numpy.linalg.linalg import norm
from jumper import Jumper
from jump_result import JumpResult
from physics_simulator import PhysicsSimulator
import numpy as np
import random
angles = [0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,
22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44]
kw_pts = [[0.00185, 0.00204, 0.00223, 0.00243, 0.00261, 0.00281, 0.00301, 0.00319,
0.00338, 0.00355, 0.00372, 0.00388, 0.00403, 0.00418, 0.00432, 0.00447,
0.00462, 0.00479, 0.00502, 0.00537, 0.00614, 0.00691, 0.00767],
[0.00232, 0.00245, 0.00258, 0.00272, 0.00285, 0.00298, 0.00311, 0.00325,
0.00337, 0.00350, 0.00362, 0.00374, 0.00386, 0.00398, 0.00410, 0.00422,
0.00436, 0.00453, 0.00474, 0.00504, 0.00553, 0.00602, 0.00651],
[0.00261, 0.00271, 0.00282, 0.00293, 0.00304, 0.00315, 0.00326, 0.00337,
0.00347, 0.00357, 0.00367, 0.00376, 0.00386, 0.00396, 0.00407, 0.00419,
0.00432, 0.00449, 0.00471, 0.00503, 0.00555, 0.00606, 0.00658]]
ka_pts = [[0.00093, 0.00139, 0.00185, 0.00231, 0.00275, 0.00316, 0.00354, 0.00390,
0.00424, 0.00455, 0.00484, 0.00511, 0.00534, 0.00555, 0.00574, 0.00591,
0.00605, 0.00617, 0.00628, 0.00638, 0.00655, 0.00672, 0.00689],
[0.00116, 0.00180, 0.00244, 0.00308, 0.00365, 0.00396, 0.00424, 0.00450,
0.00472, 0.00492, 0.00508, 0.00522, 0.00534, 0.00543, 0.00550, 0.00555,
0.00560, 0.00565, 0.00571, 0.00582, 0.00606, 0.00629, 0.00652],
[0.00130, 0.00177, 0.00224, 0.00270, 0.00316, 0.00350, 0.00382, 0.00411,
0.00436, 0.00459, 0.00479, 0.00496, 0.00510, 0.00521, 0.00531, 0.00538,
0.00545, 0.00551, 0.00558, 0.00569, 0.00590, 0.00611, 0.00632]]
kw = [np.poly1d(np.polyfit(angles, kw_pt, 4)) for kw_pt in kw_pts]
ka = [np.poly1d(np.polyfit(angles, ka_pt, 4)) for ka_pt in ka_pts]
def get_aerodynamic_data(stat, max_stat, table, angle):
half_max_stat = max_stat / 2
if stat <= max_stat / 2:
return table[2](angle) * (half_max_stat - stat) / half_max_stat + table[1](angle) * stat / half_max_stat
return table[1](angle) * (max_stat - stat) / half_max_stat + table[0](angle) * (stat - half_max_stat) / half_max_stat
def get_kw(stat, max_stat, angle):
return get_aerodynamic_data(stat, max_stat, kw, angle)
def get_ka(stat, max_stat, angle):
return get_aerodynamic_data(stat, max_stat, ka, angle)
class JumpSimulator:
def __init__(self, hill: Hill):
self.hill = hill
self.physics_sim = PhysicsSimulator(self.hill.profile)
self.physics_sim.aero_coeffs_fun = self.get_aero_coeffs
def simulate_jump(self, jumper: Jumper, wind, gate, jump_seed):
self.jumper = jumper
self.wind = wind
self.gate = gate
self.jump_seed = jump_seed
inrun_vel = self.physics_sim.simulate_inrun(gate, jumper.get_inrun_coeff())
inrun_speed_kmh = np.linalg.norm(inrun_vel) * 3.6
takeoff = jumper.get_takeoff_speed()
fly_x, fly_y, pos, normal_vel_land = self.physics_sim.simulate_flight(
inrun_vel, takeoff, wind)
distance = self.hill.profile.get_distance(pos[0])
judges_points = self.get_judges_points(jump_seed, normal_vel_land)
# print(normal_vel_land)
return (JumpResult(inrun_speed_kmh, distance, 0, wind, judges_points, self.hill), (fly_x, fly_y))
def get_aero_coeffs(self, angle):
stat = self.jumper.get_flight_coeffs()
return (get_ka(stat, 100, angle), get_kw(stat, 100, angle))
def get_judges_points(self, jump_seed, normal_speed):
normal_speed = -normal_speed
stat = self.jumper.get_style()
telemark = 4.2 + stat / 100
two_legs = 5.6 + stat / 300
touch = 6.4 + stat / 200
if normal_speed <= telemark:
base_score = 19
elif normal_speed <= two_legs:
base_score = 16.5
elif normal_speed <= touch:
base_score = 12.5
else:
base_score = 9.
base_score *= 2
random.seed(jump_seed)
bias = 3 #int(np.round((40 - base_score) / 10))
lower_bound = max(0, base_score - bias)
upper_bound = min(40, base_score + bias)
scores = [random.randint(
lower_bound, upper_bound) / 2 for _ in range(5)]
return scores
|
python
|
"""
Implements a simple HTTP/1.0 Server
"""
import socket
# Define socket host and port
SERVER_HOST = '127.0.0.1'
SERVER_PORT = 7777
# Create socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((SERVER_HOST, SERVER_PORT))
server_socket.listen(1)
print('Listening on port %s ...' % SERVER_PORT)
while True:
# Wait for client connections
client_connection, client_address = server_socket.accept()
print(f'accepted from :{client_address}')
# Get the client request
request = client_connection.recv(1024).decode()
print(request.split('\n')[0])
# Send HTTP response
response = f'HTTP/1.0 200 OK\r\nConnection: close\r\n\r\nHello {client_address} \n\n\nrequest: {request}\r\n'
client_connection.sendall(response.encode())
client_connection.close()
# Close socket
server_socket.close()
|
python
|
# coding=utf-8
#-----------------------------------------------------------
# IMPORTS
#-----------------------------------------------------------
import enigmus
import messages
import random
from entities import Entity, Player, Room
#-----------------------------------------------------------
# CLASSES
#-----------------------------------------------------------
class DoorCode(Entity):
def __init__(self):
super(DoorCode, self).__init__()
# Set from room file.
self.code = ''
self.room = ''
self.describe('ett', [], ['kodlås' , 'lås' ],
'' , [], ['kodlåset', 'låset'],
'Det är en liten kodterminal för att trycka in koder '
'med. Du förmodar att dörrarna till datasalen låses upp '
'om man trycker in rätt kod.')
self.on_message('player_command', self.__player_command,
filter=messages.for_nearby_entities(self))
def __player_command(self, player, command):
args = command.split(' ')
if args[0] != 'tryck':
return
if len(args) < 2 or args[1] != 'kod':
player.text('Tryck vad? Kod?')
return
if len(args) < 3:
player.text('Vilken kod vill du trycka?')
return
code = args[2]
player.emote('slår in en kod.')
beeps = random.sample(['*beep*', '*bzzzt*', '*boop*', '*bip*', '*BEEP*'], min(4, len(code)))
player.text('{} piper terminalen när du trycker på '
'knapparna och slår in koden {}'.format(' '.join(beeps), code))
if code != self.code:
player.text('Ingenting händer.')
return
room = player.container
for p in room.get_entities(Player):
p.text('Glasdörrarna till datasalen slår upp så snabbt att du hoppar '
'bakåt.')
player.emote('går in i datasalen.')
player.text('Glasdörrarna slår igen bakom dig.')
enigmus.instance.database.rooms[self.room].add_entity(player)
player.text(player.container.get_description(exclude_actor=player))
for p in room.get_entities(Player):
p.text('Lika snabbt som de öppnas slår dörrarna igen, alldeles för '
'snabbt för att du skulle hinna gå in utan att vara beredd.')
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-11-23 21:48
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('problems', '0040_auto_20161123_2106'),
]
operations = [
migrations.CreateModel(
name='Script',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256, verbose_name='title')),
('script', models.TextField(verbose_name='script')),
('disabled', models.BooleanField(default=False, verbose_name='disabled')),
('problem', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='problems.ProblemRevision', verbose_name='problem')),
],
options={
'abstract': False,
},
),
migrations.RemoveField(
model_name='subtask',
name='testcases',
),
migrations.AddField(
model_name='testcase',
name='subtasks',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='problems.Subtask'),
),
migrations.AlterField(
model_name='testcase',
name='name',
field=models.CharField(blank=True, editable=False, max_length=20, verbose_name='name'),
),
migrations.AddField(
model_name='testcase',
name='script',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='problems.Script'),
),
]
|
python
|
# Ultroid - UserBot
# Copyright (C) 2021-2022 TeamUltroid
#
# This file is a part of < https://github.com/TeamUltroid/Ultroid/ >
# PLease read the GNU Affero General Public License in
# <https://www.github.com/TeamUltroid/Ultroid/blob/main/LICENSE/>.
#
# Ported by @AyiinXd
# FROM Ayiin-Userbot <https://github.com/AyiinXd/Ayiin-Userbot>
# t.me/AyiinXdSupport & t.me/AyiinSupport
# ========================×========================
# Jangan Hapus Credit Ngentod
# ========================×========================
import os
from telethon import Button, custom
from AyiinXd import CMD_HANDLER as cmd
from AyiinXd import CMD_HELP, bot, tgbot
from AyiinXd.ayiin import ayiin_cmd, eor
from Stringyins import get_languages, language, get_string
from .button import BTN_URL_REGEX
def build_keyboards(buttons):
keyb = []
for btn in buttons:
if btn[0] and keyb:
keyb[0].append(Button.url(btn[0], btn[0]))
else:
keyb.append([Button.url(btn[0], btn[0])])
return keyb
Y_BUTTONS = [
[
custom.Button.url("Bᴏᴛ Sᴛʀɪɴɢ", "https://t.me/AyiinStringRobot"),
custom.Button.url("Rᴇᴘʟɪᴛ Sᴛʀɪɴɢ", "https://repl.it/@AyiinXd/AyiinString?lite=1&outputonly=1"),
],
[
custom.Button.url("Sᴜᴘᴘᴏʀᴛ", "https://t.me/AyiinXdSupport"),
],
]
@ayiin_cmd(pattern=r"lang(?: |$)(.*)")
async def setlang(event):
await eor(event, get_string("com_1"))
languages = get_languages()
if languages:
try:
AyiinUBOT = await tgbot.get_me()
BOT_USERNAME = AyiinUBOT.username
yinslang = await event.client.inline_query( # pylint:disable=E0602
BOT_USERNAME, "lang",
)
await yinslang[0].click(
event.chat_id, reply_to=event.reply_to_msg_id, hide_via=True
)
await event.delete()
except Exception as e:
await eor(event, get_string("error_1").format(e)
)
@ayiin_cmd(pattern=r"set( id| en|$)(.*)")
async def settt(event):
await eor(event, get_string("com_1"))
lang = event.pattern_match.group(1).strip()
languages = get_languages()
language[0] = lang
if not os.environ.get("lang"):
os.environ.setdefault("language", "1")
if lang == "id":
try:
os.environ.setdefault("language", lang)
await event.edit(get_string("lang_2").format(languages[lang]['asli'], lang)
)
except Exception as e:
await eor(event, get_string("error_1").format(e)
)
if lang == "en":
try:
os.environ.setdefault("language", lang)
await event.edit(get_string("lang_2").format(languages[lang]['asli'], lang)
)
except Exception as e:
await eor(event, get_string("error_1").format(e)
)
@ayiin_cmd(pattern="string(?:\\s|$)([\\s\\S]*)")
async def test_string(event):
ayiin = await eor(event, get_string("com_1"))
buttons = build_keyboards(Y_BUTTONS)
if buttons:
try:
AyiinUBOT = await tgbot.get_me()
BOT_USERNAME =AyiinUBOT.username
results = await event.client.inline_query( # pylint:disable=E0602
BOT_USERNAME, "string",
)
await results[0].click(
event.chat_id, reply_to=event.reply_to_msg_id, hide_via=True
)
await event.delete()
except Exception as e:
await eor(event, get_string("error_1").format(e)
)
CMD_HELP.update(
{
"yinslang": f"**Plugin :** `yinslang`\
\n\n » **Perintah :** `{cmd}lang`\
\n » **Kegunaan : **__Untuk Melihat Daftar Bahasa Yang Tersedia.__\
\n\n » **Perintah :** `{cmd}set <nama_bahasa>`\
\n » **Kegunaan : **__Untuk Mengubah Bahasa.__\
\n\n » **Perintah :** `{cmd}string`\
\n » **Kegunaan : **__Untuk Membuat String Session.__\
"
}
)
|
python
|
# encoding:utf-8
from flask import Flask,request
import json
import time
import sys
import sqlite3
import os
app=Flask(__name__)
####回复文本格式##########
re={}
result={}
result["type"]="text"
result["content"]=""
re["error_code"]=0
re["error_msg"]=""
re["result"]=result
dic={'温度':'temperature','湿度':'humidity','光照':'light','二氧化碳':'co2_simulation','声音':'noise'}
##########意图对应的语音回复文本字典############
response={'AC1_OC_OPEN':'请稍等,正在为您打开空调,接下来您可以选择调节温度、风速,改变空调模式等选项,如果我没理解您的命令,\
您可以通过“怎样调节”加目标项来获得标准控制命令',
'AC1_OC_CLOSE':'请稍等,正在为您关闭空调',
'AC1_TEMP_UP':'正在帮您升高空调温度,请稍等',
'AC1_TEMP_DOWN':'正在帮您降低空调温度,请稍等',
'AC1_WSPEED_UP':'正在帮您提高空调风速,请稍等',
'AC1_WSPEED_DOWN':'正在帮您降低空调风速,请稍等',
'AC1_SLEEP_OPEN':'好的,正在为您打开睡眠模式,请稍等',
'AC1_SLEEP_CLOSE':'好的,正在为您关闭睡眠模式,请稍等',
'AC1_TIMER_CLOSE':'好的,正在为您取消空调定时,请稍等'}
##############词槽标准化字典#############
normal={'高':'0','中':'1','低':'2','制冷':'0','制热':'1','送风':'2','自动':'3','除湿':'4','关':'close','开':'open','平衡':'0','环保':'1','极致':'2',
'16度':'16','17度':'17','18度':'18','19度':'19','20度':'20','21度':'21','22度':'22','23度':'23','24度':'24','25度':'25',
'26度':'26','27度':'27','28度':'28','29度':'29','30度':'30','降低':'lower','热':'lower','升高':'higher','冷':'higher',
'半小时':'0.5','一小时':'1.0','一个半小时':'1.5','两小时':'2.0','两个半小时':'2.5','三小时':'3.0','温度':'0','湿度':'1','声音':'2','光照':'3','二氧化碳':'4'}
class Response:
def __init__(self,intent,nom_word):
self.re=re
self.intent=intent
self.nom_word=nom_word
def json_resp(self,intent,nom_word):
if intent in response.keys():
re["result"]["content"]=response[intent]
elif intent=="AC1_TEMP_TO":
re["result"]["content"]="好的,正在为您将空调设置为"+nom_word+",请稍等"
elif intent=="AC1_WSPEED_TO":
re["result"]["content"]="好的,正在为您调节风速为"+nom_word+",请稍等"
elif intent=="AC1_TIMER_SET":
re["result"]["content"]="好的,正在为您设置空调定时"+nom_word
elif intent=="AC1_COMMOD_SELECT"or intent=="AC1_SMARTMOD_SELECT":
re["result"]["content"]="好的,正在为您调节到"+nom_word+"模式,请稍等"
elif intent=="ROOM1_ENV_INFO_QUERY":
conn = sqlite3.connect("db.sqlite3")
c = conn.cursor()
############选择查询信息的最新值#########
c.execute('SELECT '+dic[nom_word]+' FROM myhome_nodedata ORDER BY id desc')
#########取出查询项对应字段对应数据######
query_result=c.fetchone()[0]
re["result"]["content"]="您当前的室内"+nom_word+"为"+str(query_result)
else:
re["result"]["content"]="请求失败,请重试"
return re
@app.route("/unit/callback",methods=['POST'])
def callback():
########加载json数据并转换为字典形式###########
dic=json.loads(str(request.data,encoding='utf-8'))
########获取字典中的意图和词槽即“intent”和“normallized_word”字段数据########
intent=dic["response"]["schema"]["intent"]
nom_word=dic["response"]["schema"]["slots"][0]["normalized_word"]
######创建回复类对象#######
exp1=Response(intent,nom_word)
########将表征请求中意图、词槽的数据更新存储控制命令的数据库表单(若不存在则先创建)############
if os.path.exists("db.sqlite3"):
conn = sqlite3.connect("db.sqlite3")
c = conn.cursor()
else:
conn = sqlite3.connect("db.sqlite3")
c = conn.cursor()
c.execute('''CREATE TABLE myhome_commands(ID integer NOT NULL PRIMARY KEY AUTOINCREMENT,INTENT
text NOT NULL ,SLOTS text NOT NULL)''')
c.execute('INSERT INTO myhome_commands VALUES(1,"0","0")') #插入意图和归一化词槽
c.execute("UPDATE myhome_commands SET INTENT=?,SLOTS=? where ID=1",(intent,normal[nom_word]))
conn.commit()
conn.close()
#######获得答复数据并转换为json格式返回给请求方######
json_re=exp1.json_resp(intent,nom_word)
json_re=json.dumps(re)
return json_re
if __name__=='__main__':
app.run(host='172.20.10.12',port=9999,debug=True)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#1 - Normalize vector:
def normalize_v(V):
m = 0
for x in V: m += x**2 #Sum of the elements powered to the 2
m = sqrt(m) #Get vector's norm
return [x/m for x in V] #Divide each element of vector by its norm
#2 - Find D, euclidian distance
def euclid_dis(V):
d=0
for i in xrange(len(V)):
for j in xrange(len(W[i])): #Len of matrix
d += (V[i]-W[i][j])**2 #∑(xi – mij)**2
return sqrt(d)
#3 - Find j*, the winning attractor
def find_att(d):
star=[0,0] #Start with the beginning, keep coordinates in a list
for A in xrange(Units) : # scan all units
for B in xrange(Units) :
if distance(W[A][B], d) < distance(W[star[0]][star[1]], d) :
star = [A,B] # closest unit
return star
#4 - signal de sortie de j* à 1, tous les autres à 0
#Cette fonction existe parce que l'algorithme du cours demande une sortie. Je ne vois pas l'utilité de cette dernière pour notre utilisation
def provide_sortie(sortie,star):
for x in xrange(Units):
for y in xrange(Units):
if x == star[0] and y == star[1]: sortie[x][y]=1
else: sortie[x][y]=0
#5 - règle les pondérations
#Le voisinage est ici hexagonal, dis_rad mesure la proximité de voisinage.
#Par convention, chaque ligne paire de la matrice est décalée vers la droite pour faire exister cette hexagonalité
def weighting(signal, alpha, star, radius):
left = max(star[0] - radius, 0) # left boundary
right = min(star[0] + radius+1, Units) # right boundary
top = max(star[1] - radius, 0) #top boundary
bottom = min(star[1] + radius+1, Units) #bottom boundary
for A in xrange(left, right) : # scan neighborhood left-right
dis_A = abs(A-star[0]) #Distance left or right with attractor j*
for B in xrange(top, bottom): # scan neighborhood top-bottom
dis_B = abs(B-star[1]) #Distance top or bottom with attractor j*
dis_rad = dis_A if dis_A >= dis_B else dis_B #Distance total between neighboor element in matrix and j*, take the bigger distance
left_lim = left + dis_B/2 #left limit contingent on vertical distance
right_lim = right - dis_B/2 #Same for right limit
if dis_B%2 == 1: #If distance of neighbooring attractor to j* is odd
if B%2 == 0:left_lim += 1 #If j* row is even within the matrix, shift left limit by one to the right
else : right_lim -= 1 #else shift right limit by one to the left
if A >= right_lim or A<left_lim: continue #Do not modify neighbooring attractors not within the limits above, this guarantees hexagonality of the weighting process
else:
for i in xrange(len(signal)): #For both values of signal vector
W[A][B] += (signal[i] - W[A][B]) * (alpha / (1 + dis_rad))#distance degree linearized
#Fonction pilote:
from math import fabs, sqrt
def distance(u, v) : return fabs(u - v)
def new_rate(lap) : return 1 - (lap / float(T))
def teach(W) :
alpha = ALPHAo # learning rate
radius = Ro # neighborhood
sortie = [[0]*Units for x in xrange(Units)] #Parce que demandé par l'algorithme
for lap in xrange(T) : #2 * (10**4)
signal = normalize_v([randrange(1, 10)for x in xrange(2)]) #Signal aléatoire
d = euclid_dis(signal) #Find euclidian distance
star = find_att(d) #With it find j*
provide_sortie(sortie, star) #Provide sortie
weighting(signal, alpha, star, radius) #Pondération du voisinage
alpha = ALPHAo * new_rate(lap) # new learning rate
radius = int(Ro * new_rate(lap)) # new integral radius
return lap
#Pour lancer l'apprentissage
print("Here, we are testing the topology map by using a 8*8 matrix, we initiate each processor's attractivity weight between .45 and .55, .2 as a learning coefficient. The entry signals are random and because we iterate 2 * (10**4), all processors' weight end up at the same place:\n")
from random import randrange
Units = 8
W = [[(randrange(450, 550) * .001) for i in xrange(Units)] for x in xrange(Units)] # 0.5 ± 5 %
M = [x[:] for x in W]
ALPHAo = .2
Ro = Units / 2 # half of the width of the network
T = 2 * (10**4)
teach(W) # training phase
for A in xrange(Units) :
for B in xrange(Units) : print "%6i%2i\t%4.2f --> %4.2f" % (A,B, M[A][B], W[A][B])
|
python
|
from spaceone.inventory.manager.pricing_manager import PricingManager
|
python
|
#!/usr/bin/env python
import sys
import dnfile
from hashlib import sha256
filename = sys.argv[1]
sha256hash = ''
with open(filename, 'rb') as fh_in:
sha256hash = sha256(fh_in.read()).hexdigest()
pe = dnfile.dnPE(filename)
#tbl = pe.net.mdtables.MemberRef
tbl = pe.net.mdtables.TypeRef
tbl_num_rows =\
pe.get_offset_from_rva(tbl.num_rows)
tbl_row_size =\
pe.get_offset_from_rva(tbl.row_size)
tbl_bytes = pe.get_data(tbl.rva, (tbl_num_rows*tbl_row_size))
hex_str = ''
for i in range(0, len(tbl_bytes), 6):
a = tbl_bytes[i:i+2].hex()
b = tbl_bytes[i+4:i+6].hex()
hex_str += f'{a}[2]{b}'
rule = '''
import "pe"
rule DotNet_Tbl_{}
{{
meta:
hash = "{}"
strings:
$ = {{{}}}
condition:
uint16(0) == 0x5A4D
and (uint32(uint32(0x3C)) == 0x00004550)
and pe.data_directories[pe.IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].virtual_address != 0
and uint32be(
pe.rva_to_offset(
uint32(
pe.rva_to_offset(pe.data_directories[pe.IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].virtual_address)+8
)
)
) == 0x42534a42
and all of them
}}
'''
print(rule.format(tbl.name, sha256hash, hex_str))
with open(f'{filename}_tbl_{tbl.name}.bin', 'wb') as fh_out:
fh_out.write(tbl_bytes)
|
python
|
import pandas as pd
fname = "LBW_dataset.csv"
df = pd.read_csv(fname)
#cleaning data
df = df.drop(columns=['Education'])
df = df.interpolate()
df['Community'] = df['Community'].round()
df['Delivery phase'] = df['Delivery phase'].round()
df['IFA'] = df['IFA'].round()
#df = df.round()
mean_weight = df['Weight'].mean()
mean_age = df['Age'].mean()
mean_hb = df['HB'].mean()
mean_bp = df['BP'].mean()
std_weight = df['Weight'].std()
std_age = df['Age'].std()
std_hb = df['HB'].std()
std_bp = df['BP'].std()
# In the below code we standardise the data so that there is no unnecessary bias in the neural network.
# mean_x --> mean of column x
# std_x --> standard deviation of column x
df.loc[:, 'Weight'] = df.Weight.apply(lambda x : (x - mean_weight) / std_weight )
df.loc[:, 'Age'] = df.Age.apply(lambda x : (x - mean_age) / std_age)
df.loc[:, 'HB'] = df.HB.apply(lambda x : (x - mean_hb) / std_hb)
df.loc[:, 'BP'] = df.BP.apply(lambda x : (x - mean_bp) / std_bp)
df.to_csv("LBW_Dataset_Cleaned.csv")
|
python
|
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import traceback
from timeit import default_timer as timer
import reversion
from django.core.management.base import BaseCommand
from django.db import transaction
from wells.models import Well, ActivitySubmission
from wells.stack import StackWells
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Run from command line:
python manage.py legacy_records
"""
def add_arguments(self, parser):
# Arguments added for debugging purposes.
parser.add_argument('--start', type=int, nargs='?', help='Well to start at', default=1)
parser.add_argument('--end', type=int, nargs='?', help='Well to end at', default=50)
parser.add_argument('--next', type=int, nargs='?', help='Process n wells', default=0)
def handle(self, *args, **options):
# pylint: disable=broad-except
start = options['start']
end = options['end']
to_do_amount = options['next']
# We turn off reversion of ActivitySubmissions as we don't want to bloat the DB
reversion.unregister(ActivitySubmission)
reversion.unregister(Well)
num_wells = 0
if to_do_amount:
wells = self.find_next_n_wells_without_legacy_records(start, to_do_amount)
else:
wells = self.find_wells_without_legacy_records(start, end)
num_wells = len(wells)
if num_wells == 0:
self.stdout.write(self.style.ERROR(f'No records found between well tag number {start} and {end}'))
return
print(f'Creating {num_wells} legacy records from well_tag_number {wells[0].well_tag_number} to {wells[len(wells) - 1].well_tag_number}')
failures = []
start = timer()
for well in wells:
try:
self.create_legacy_record(well)
except Exception as err:
failures.append(well.well_tag_number)
print(f'Error creating legacy record for well_tag_number {well.well_tag_number}')
# logger.exception(err)
print(traceback.format_exc(limit=8))
end = timer()
num_fails = len(failures)
num_created = num_wells - num_fails
if num_created > 0:
success_msg = 'Created {} legacy reports in {:.2f}s'.format(num_created, end - start)
self.stdout.write(self.style.SUCCESS(success_msg))
if num_fails > 0:
failed_wells = ', '.join(map(str, failures))
error_msg = 'Failed to create {} legacy reports for wells: {}' \
.format(num_fails, failed_wells)
clues_msg = 'See above stack traces for clues to why these failed'
self.stdout.write(self.style.ERROR(error_msg))
self.stdout.write(self.style.ERROR(clues_msg))
def find_wells_without_legacy_records(self, start, end):
wells = Well.objects \
.filter(well_tag_number__gte=start,
well_tag_number__lte=end,
activitysubmission__isnull=True) \
.order_by('well_tag_number')
return wells
def find_next_n_wells_without_legacy_records(self, start, num):
wells = Well.objects \
.filter(well_tag_number__gte=start, activitysubmission__isnull=True) \
.order_by('well_tag_number') \
[0:num]
return wells
@transaction.atomic
def create_legacy_record(self, well):
# pylint: disable=protected-access
# NOTE that _create_legacy_submission() will create the LEGACY activity
# submission but then when the `submission_serializer.save()` is called
# inside of `_create_legacy_submission()` this will trigger a
# `StackWells().process()` call which will in turn call
# `_update_well_record()` which checks to see if it should create a new
# legacy record (it shouldn't). Instead it will just call
# `self._stack(records, submission.well)` for this one legacy record.
StackWells()._create_legacy_submission(well)
|
python
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import numpy as np
from astropy.io import fits
from astropy.table import Table
from gammapy.data import GTI
from gammapy.maps import MapCoord, Map
from gammapy.estimators.core import FluxEstimate
from gammapy.estimators.flux_point import FluxPoints
from gammapy.utils.table import table_from_row_data
from gammapy.modeling.models import (
SkyModel,
PowerLawSpectralModel,
PointSpatialModel,
Models,
)
from gammapy.utils.scripts import make_path
__all__ = ["FluxMaps"]
REQUIRED_MAPS = {
"dnde": ["dnde"],
"e2dnde": ["e2dnde"],
"flux": ["flux"],
"eflux": ["eflux"],
"likelihood": ["norm"],
}
#TODO: add an entry for is_ul?
OPTIONAL_MAPS = {
"dnde": ["dnde_err", "dnde_errp", "dnde_errn", "dnde_ul"],
"e2dnde": ["e2dnde_err", "e2dnde_errp", "e2dnde_errn", "e2dnde_ul"],
"flux": ["flux_err", "flux_errp", "flux_errn", "flux_ul"],
"eflux": ["eflux_err", "eflux_errp", "eflux_errn", "eflux_ul"],
"likelihood": ["norm_err", "norm_errn", "norm_errp","norm_ul", "norm_scan", "stat_scan"],
}
log = logging.getLogger(__name__)
class FluxMaps(FluxEstimate):
"""A flux map container.
It contains a set of `~gammapy.maps.Map` objects that store the estimated flux as a function of energy as well as
associated quantities (typically errors, upper limits, delta TS and possibly raw quantities such counts,
excesses etc). It also contains a reference model to convert the flux values in different formats. Usually, this
should be the model used to produce the flux map.
The associated map geometry can use a `RegionGeom` to store the equivalent of flux points, or a `WcsGeom`/`HpxGeom`
to store an energy dependent flux map.
The container relies internally on the 'Likelihood' SED type defined in :ref:`gadf:flux-points`
and offers convenience properties to convert to other flux formats, namely:
``dnde``, ``flux``, ``eflux`` or ``e2dnde``. The conversion is done according to the reference model spectral shape.
Parameters
----------
data : dict of `~gammapy.maps.Map`
the maps dictionary. Expected entries are the following:
* norm : the norm factor
* norm_err : optional, the error on the norm factor.
* norm_errn : optional, the negative error on the norm factor.
* norm_errp : optional, the positive error on the norm factor.
* norm_ul : optional, the upper limit on the norm factor.
* norm_scan : optional, the norm values of the test statistic scan.
* stat_scan : optional, the test statistic scan values.
* ts : optional, the delta TS associated with the flux value.
* sqrt_ts : optional, the square root of the TS, when relevant.
reference_model : `~gammapy.modeling.models.SkyModel`, optional
the reference model to use for conversions. Default in None.
If None, a model consisting of a point source with a power law spectrum of index 2 is assumed.
gti : `~gammapy.data.GTI`
the maps GTI information. Default is None.
"""
def __init__(self, data, reference_model=None, gti=None):
self.geom = data['norm'].geom
if reference_model == None:
log.warning("No reference model set for FluxMaps. Assuming point source with E^-2 spectrum.")
reference_model = self._default_model()
self.reference_model = reference_model
self.gti = gti
super().__init__(data, spectral_model=reference_model.spectral_model)
@staticmethod
def _default_model():
return SkyModel(spatial_model=PointSpatialModel(), spectral_model=PowerLawSpectralModel(index=2))
@property
def _additional_maps(self):
return self.data.keys() - (REQUIRED_MAPS["likelihood"] + OPTIONAL_MAPS["likelihood"])
@property
def energy_ref(self):
axis = self.geom.axes["energy"]
return axis.center
@property
def energy_min(self):
axis = self.geom.axes["energy"]
return axis.edges[:-1]
@property
def energy_max(self):
axis = self.geom.axes["energy"]
return axis.edges[1:]
@property
def ts(self):
if not "ts" in self.data:
raise KeyError("No ts map present in FluxMaps.")
return self.data["ts"]
@property
def sqrt_ts(self):
if not "sqrt_ts" in self.data:
raise KeyError("No sqrt_ts map present in FluxMaps.")
return self.data["sqrt_ts"]
def __str__(self):
str_ = f"{self.__class__.__name__}\n"
str_ += "\t"+ "\t\n".join(str(self.norm.geom).split("\n")[:1])
str_ += "\n\t"+"\n\t".join(str(self.norm.geom).split("\n")[2:])
str_ += f"\n\tAvailable quantities : {self._available_quantities}\n\n"
str_ += f"\tAdditional maps : {self._additional_maps}\n\n"
str_ += "\tReference model:\n"
if self.reference_model is not None:
str_ += "\t" + "\n\t".join(str(self.reference_model).split("\n")[2:])
return str_.expandtabs(tabsize=2)
def get_flux_points(self, coord=None):
"""Extract flux point at a given position.
The flux points are returned in the the form of a `~gammapy.estimators.FluxPoints` object
(which stores the flux points in an `~astropy.table.Table`)
Parameters
---------
coord : `~astropy.coordinates.SkyCoord`
the coordinate where the flux points are extracted.
Returns
-------
fluxpoints : `~gammapy.estimators.FluxPoints`
the flux points object
"""
if coord is None:
coord = self.geom.center_skydir
energies = self.energy_ref
coords = MapCoord.create(dict(skycoord=coord, energy=energies))
ref = self.dnde_ref.squeeze()
fp = dict()
fp["norm"] = self.norm.get_by_coord(coords) * self.norm.unit
for quantity in self._available_quantities:
norm_quantity = f"norm_{quantity}"
res = getattr(self, norm_quantity).get_by_coord(coords)
res *= getattr(self, norm_quantity).unit
fp[norm_quantity] = res
for additional_quantity in self._additional_maps:
res = self.data[additional_quantity].get_by_coord(coords)
res *= self.data[additional_quantity].unit
fp[additional_quantity] = res
# TODO: add support of norm and stat scan
rows = []
for idx, energy in enumerate(self.energy_ref):
result = dict()
result["e_ref"] = energy
result["e_min"] = self.energy_min[idx]
result["e_max"] = self.energy_max[idx]
result["ref_dnde"] = ref[idx]
result["norm"] = fp["norm"][idx]
for quantity in self._available_quantities:
norm_quantity = f"norm_{quantity}"
result[norm_quantity] = fp[norm_quantity][idx]
for key in self._additional_maps:
result[key] = fp[key][idx]
rows.append(result)
table = table_from_row_data(rows=rows, meta={"SED_TYPE": "likelihood"})
return FluxPoints(table).to_sed_type('dnde')
def to_dict(self, sed_type="likelihood"):
"""Return maps in a given SED type in the form of a dictionary.
Parameters
----------
sed_type : str
sed type to convert to. Default is `Likelihood`
Returns
-------
map_dict : dict
dictionary containing the requested maps.
"""
if sed_type == "likelihood":
map_dict = self.data
else:
map_dict = {}
for entry in REQUIRED_MAPS[sed_type]:
map_dict[entry] = getattr(self, entry)
for entry in OPTIONAL_MAPS[sed_type]:
try:
map_dict[entry] = getattr(self, entry)
except KeyError:
pass
for key in self._additional_maps:
map_dict[key] = self.data[key]
return map_dict
def write(self, filename, filename_model=None, overwrite=False, sed_type="likelihood"):
"""Write flux map to file.
Parameters
----------
filename : str
Filename to write to.
filename_model : str
Filename of the model (yaml format).
If None, keep string before '.' and add '_model.yaml' suffix
overwrite : bool
Overwrite file if it exists.
sed_type : str
sed type to convert to. Default is `likelihood`
"""
filename = make_path(filename)
if filename_model is None:
name_string = filename.as_posix()
for suffix in filename.suffixes:
name_string.replace(suffix,'')
filename_model = name_string + '_model.yaml'
filename_model=make_path(filename_model)
hdulist = self.to_hdulist(sed_type)
models = Models(self.reference_model)
models.write(filename_model, overwrite=overwrite)
hdulist[0].header['MODEL'] = filename_model.as_posix()
hdulist.writeto(str(make_path(filename)), overwrite=overwrite)
def to_hdulist(self, sed_type="likelihood", hdu_bands=None):
"""Convert flux map to list of HDUs.
For now, one cannot export the reference model.
Parameters
----------
sed_type : str
sed type to convert to. Default is `Likelihood`
hdu_bands : str
Name of the HDU with the BANDS table. Default is 'BANDS'
If set to None, each map will have its own hdu_band
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
Map dataset list of HDUs.
"""
exclude_primary = slice(1, None)
hdu_primary = fits.PrimaryHDU()
hdulist = fits.HDUList([hdu_primary])
hdu_primary.header["SED_TYPE"] = sed_type
map_dict = self.to_dict(sed_type)
for key in map_dict:
hdulist += map_dict[key].to_hdulist(hdu=key, hdu_bands=hdu_bands)[exclude_primary]
if self.gti:
hdu = fits.BinTableHDU(self.gti.table, name="GTI")
hdulist.append(hdu)
return hdulist
@classmethod
def read(cls, filename):
"""Read map dataset from file.
Parameters
----------
filename : str
Filename to read from.
Returns
-------
flux_map : `~gammapy.estimators.FluxMaps`
Flux map.
"""
with fits.open(str(make_path(filename)), memmap=False) as hdulist:
return cls.from_hdulist(hdulist)
@classmethod
def from_hdulist(cls, hdulist, hdu_bands=None):
"""Create flux map dataset from list of HDUs.
Parameters
----------
hdulist : `~astropy.io.fits.HDUList`
List of HDUs.
hdu_bands : str
Name of the HDU with the BANDS table. Default is 'BANDS'
If set to None, each map should have its own hdu_band
Returns
-------
fluxmaps : `~gammapy.estimators.FluxMaps`
the flux map.
"""
try:
sed_type = hdulist[0].header["SED_TYPE"]
except KeyError:
raise ValueError(f"Cannot determine SED type of flux map from primary header.")
result = {}
for map_type in REQUIRED_MAPS[sed_type]:
if map_type.upper() in hdulist:
result[map_type] = Map.from_hdulist(hdulist, hdu=map_type, hdu_bands=hdu_bands)
else:
raise ValueError(f"Cannot find required map {map_type} for SED type {sed_type}.")
for map_type in OPTIONAL_MAPS[sed_type]:
if map_type.upper() in hdulist:
result[map_type] = Map.from_hdulist(hdulist, hdu=map_type, hdu_bands=hdu_bands)
# Read additional image hdus
for hdu in hdulist[1:]:
if hdu.is_image:
if hdu.name.lower() not in (REQUIRED_MAPS[sed_type]+OPTIONAL_MAPS[sed_type]):
result[hdu.name.lower()] = Map.from_hdulist(hdulist, hdu=hdu.name, hdu_bands=hdu_bands)
model_filename = hdulist[0].header.get("MODEL", None)
reference_model = None
if model_filename:
try:
reference_model = Models.read(model_filename)[0]
except FileNotFoundError:
raise FileNotFoundError(f"Cannot find {model_filename} model file. Check MODEL keyword.")
if "GTI" in hdulist:
gti = GTI(Table.read(hdulist["GTI"]))
else:
gti = None
return cls.from_dict(result, sed_type, reference_model, gti)
@staticmethod
def _validate_type(maps, sed_type):
"""Check that map input is valid and correspond to one of the SED type."""
try:
required = set(REQUIRED_MAPS[sed_type])
except:
raise ValueError(f"Unknown SED type.")
if not required.issubset(maps.keys()):
missing = required.difference(maps.keys())
raise ValueError(
"Missing maps for sed type '{}':" " {}".format(sed_type, missing)
)
@classmethod
def from_dict(cls, maps, sed_type='likelihood', reference_model=None, gti=None):
"""Create FluxMaps from a dictionary of maps.
Parameters
----------
maps : dict
dictionary containing the requested maps.
sed_type : str
sed type to convert to. Default is `Likelihood`
reference_model : `~gammapy.modeling.models.SkyModel`, optional
the reference model to use for conversions. Default in None.
If None, a model consisting of a point source with a power law spectrum of index 2 is assumed.
gti : `~gammapy.data.GTI`
the maps GTI information. Default is None.
Returns
-------
fluxmaps : `~gammapy.estimators.FluxMaps`
the flux map.
"""
cls._validate_type(maps, sed_type)
if sed_type == 'likelihood':
return cls(maps, reference_model)
e_ref = maps[sed_type].geom.axes["energy"].center
e_edges = maps[sed_type].geom.axes["energy"].edges
if reference_model is None:
log.warning("No reference model set for FluxMaps. Assuming point source with E^-2 spectrum.")
reference_model = cls._default_model()
ref_dnde = reference_model.spectral_model(e_ref)
if sed_type == "dnde":
factor = ref_dnde
elif sed_type == "flux":
factor = reference_model.spectral_model.integral(e_edges[:-1], e_edges[1:])
elif sed_type == "eflux":
factor = reference_model.spectral_model.energy_flux(e_edges[:-1], e_edges[1:])
elif sed_type == "e2dnde":
factor = e_ref ** 2 * ref_dnde
# to ensure the units are similar
factor = factor.to(maps[sed_type].unit)
data = dict()
data["norm"] = maps[sed_type]/factor[:,np.newaxis, np.newaxis]
for map_type in OPTIONAL_MAPS[sed_type]:
if map_type in maps:
norm_type = map_type.replace(sed_type, "norm")
data[norm_type] = maps[map_type]/factor[:,np.newaxis, np.newaxis]
# We add the remaining maps
for key in maps.keys() - (REQUIRED_MAPS[sed_type] + OPTIONAL_MAPS[sed_type]):
data[key] = maps[key]
return cls(data, reference_model, gti)
|
python
|
from flask import Flask
from driver import get_final_kmeans
app = Flask(__name__)
@app.route("/")
def hello():
return get_final_kmeans()
|
python
|
from typing import Any, Dict, List, Optional, Union
from interactions.ext.paginator import Paginator
from thefuzz.fuzz import ratio
from interactions import Client, CommandContext, DictSerializerMixin, Embed, Extension
from .settings import AdvancedSettings, PaginatorSettings, TemplateEmbed, typer_dict
class RawHelpCommand(DictSerializerMixin):
__slots__ = (
"_json",
"client",
"scope",
"sync_commands",
"_commands",
)
_json: Dict[str, Any]
client: Client
sync_commands: bool
_commands: List[dict]
def __init__(
self,
client: Client,
sync_commands: bool = False,
**kwargs,
):
super().__init__(client=client, sync_commands=sync_commands, **kwargs)
self._commands = []
async def _get_all_commands(
self,
global_commands: bool = True,
guild_commands: bool = True,
guild: Optional[int] = None,
):
result = []
if global_commands:
result = await self.client._http.get_application_commands(self.client.me.id)
result = [] if result is None else result
if guild_commands and guild:
guild_result = await self.client._http.get_application_commands(
self.client.me.id, guild
)
result.append(guild_result) if guild_result is not None else None
result = list(filter(lambda x: x is not None, result))
if not result:
raise RuntimeError("No commands found")
return result
async def get_commands(
self,
global_commands: bool = True,
guild_commands: bool = True,
guild: Optional[int] = None,
):
if self._commands and not self.sync_commands:
return self._commands
# get all commands
all_commands = await self._get_all_commands(global_commands, guild_commands, guild)
# separate by category
commands, subcommands, menus = [], [], []
guild_id_index = None
await self.__sort_all_commands(all_commands, commands, subcommands, menus, guild_id_index)
master: List[dict] = []
for command in commands:
command: dict
cmd_ext: Optional[Extension] = next(
(
ext_name
for ext_name, ext in self.client._extensions.items()
if isinstance(ext, Extension)
and f'command_{command["name"]}' in ext._commands.keys()
),
None,
)
master.append(
{
"name": command["name"],
"description": command["description"],
"options": command["options"],
"type": "slash command",
"extension": cmd_ext,
}
)
for subcommand in subcommands:
for sub in subcommand["options"]:
sub: dict
if sub["type"] == 1:
sub["options"] = sub.get("options", [])
cmd_ext: Optional[Extension] = next(
(
ext_name
for ext_name, ext in self.client._extensions.items()
if isinstance(ext, Extension)
and f'command_{subcommand["name"]}' in ext._commands.keys()
),
None,
)
master.append(
{
"name": f'{subcommand["name"]} {sub["name"]}',
"description": sub["description"],
"options": sub["options"],
"type": "subcommand",
"extension": cmd_ext,
}
)
else:
sub["options"][0]["options"] = sub["options"][0].get("options", [])
cmd_ext: Optional[Extension] = next(
(
ext_name
for ext_name, ext in self.client._extensions.items()
if isinstance(ext, Extension)
and f'command_{subcommand["name"]}' in ext._commands.keys()
),
None,
)
master.append(
{
"name": f'{subcommand["name"]} {sub["name"]} {sub["options"][0]["name"]}',
"description": sub["options"][0]["description"],
"options": sub["options"][0]["options"],
"type": "subcommand group",
"extension": cmd_ext,
}
)
for menu in menus:
cmd_ext: Optional[Extension] = next(
(
ext_name
for ext_name, ext in self.client._extensions.items()
if isinstance(ext, Extension)
and f'command_{menu["name"]}' in ext._commands.keys()
),
None,
)
master.append(
{
"name": menu["name"],
"description": None,
"type": ("user menu" if menu["type"] == 2 else "message menu"),
"extension": cmd_ext,
}
)
for interaction in master:
interaction: dict
if interaction.get("options", None) is not None:
for option in interaction["options"]:
option: dict
option["required"] = option.get("required", False)
self._commands = master
return master
async def __sort_all_commands(
self,
all_commands: List[Union[List[dict], dict]],
commands: list,
subcommands: list,
menus: list,
guild_ids_index: Optional[int],
):
# first, sort all global commands
for command in all_commands:
if isinstance(command, list):
guild_ids_index = all_commands.index(command)
break
if command["type"] == 1:
if "options" in command.keys() and command["options"][0]["type"] in (
1,
2,
):
subcommands.append(command)
else:
if "options" not in command.keys():
command["options"] = []
commands.append(command)
else:
menus.append(command)
# next, sort all guild commands if applicable
if guild_ids_index is not None:
for command in all_commands[guild_ids_index]:
if command["type"] == 1:
if "options" in command.keys() and command["options"][0]["type"] in (
1,
2,
):
subcommands.append(command)
else:
if "options" not in command.keys():
command["options"] = []
commands.append(command)
else:
menus.append(command)
class HelpCommand(RawHelpCommand):
__slots__ = (
"_json",
"client",
"sync_commands",
"template_embed",
"paginator_settings",
"advanced_settings",
)
client: Client
sync_commands: bool
template_embed: TemplateEmbed
paginator_settings: PaginatorSettings
advanced_settings: AdvancedSettings
def __init__(
self,
client: Client,
sync_commands: bool = False,
template_embed: TemplateEmbed = TemplateEmbed(),
paginator_settings: PaginatorSettings = PaginatorSettings(),
advanced_settings: AdvancedSettings = AdvancedSettings(),
) -> None:
super().__init__(
client=client,
sync_commands=sync_commands,
template_embed=template_embed,
paginator_settings=paginator_settings,
advanced_settings=advanced_settings,
)
async def send_help(
self,
ctx: CommandContext,
search: Optional[str] = None,
guild_id: Optional[int] = None,
):
if guild_id is None:
guild_id = ctx.guild_id
await self.get_commands(guild=guild_id)
data: List[dict] = self._commands.copy()
if search is not None:
search: str = search.lower()
answers: dict = {}
list_extensions: list = []
list_commands: list = []
# extensions
for interaction in data:
if self.__ext_in_blacklist(interaction):
continue
percent = ratio(search, interaction["extension"])
if interaction["extension"] not in answers:
answers[interaction["extension"]] = percent
list_extensions.append(interaction["extension"])
# commands
for interaction in data:
if self.__cmd_in_blacklist(interaction):
continue
percent = ratio(search, interaction["name"])
if interaction["name"] not in answers.keys():
answers[interaction["name"]] = percent
list_commands.append(interaction["name"])
sorted_data: list = sorted(answers, key=answers.get, reverse=True)[
: self.advanced_settings.max_search_results
]
embeds: List[Embed] = []
for i in range(0, len(sorted_data), self.template_embed.fields_per_embed):
page = Embed(
title=f"Search results for `{search}`, {i + 1} - {i + self.template_embed.fields_per_embed}",
color=self.template_embed.color,
)
for match in sorted_data[i : (i + self.template_embed.fields_per_embed)]:
if match in list_extensions:
ext: str = None
cmds: List[Dict[str, dict]] = []
for interaction in data:
if match == interaction["extension"]:
ext = interaction["extension"]
cmds.append({interaction["name"]: interaction})
if ext is not None:
value = "Category\nCommands:\n"
for cmd in cmds:
in_blacklist = False
if self.advanced_settings.blacklist:
for black in self.blacklist:
if black in list(cmd.keys())[0]:
in_blacklist = True
break
if in_blacklist:
continue
elif match in list_commands:
for interaction in data:
if match == interaction["name"]:
break
options: str = ""
if interaction["type"] in {
"slash command",
"subcommand",
"subcommand group",
}:
for option in interaction["options"]:
the_type = typer_dict(
option["type"],
option["choices"] if "choices" in option.keys() else [],
)
options += f"[{option['name']}: {'' if option['required'] else 'optional '}{the_type}], "
elif "menu" not in interaction["type"]:
options += interaction["options"]
options = options[:-2] if options.endswith(", ") else options
how_to_use = f"\nHow to use:\n```\n{f'/' if interaction['type'] in {'slash command', 'subcommand', 'subcommand group'} else ('Right click on a ' + interaction['type'].replace(' menu', '')) if 'menu' in interaction['type'] else '/'}{'' if 'menu' in interaction['type'] else interaction['name']} {options}\n```"
page.add_field(
name=interaction["name"],
value=(
""
if interaction["description"] is None
else interaction["description"]
)
+ f"\n{interaction['type'].capitalize()}"
+ how_to_use,
inline=False,
)
if self.template_embed.footer is not None:
page.set_footer(text=self.template_embed.footer)
embeds.append(page)
return await Paginator(
client=self.client,
ctx=ctx,
pages=embeds,
timeout=self.paginator_settings.timeout,
author_only=self.paginator_settings.author_only,
use_select=self.paginator_settings.use_select,
extended_buttons=self.paginator_settings.extended_buttons,
).run()
else:
first_page = (
Embed(title="Help", color=self.template_embed.color)
if self.template_embed.description is None
else Embed(
title="Help",
description=self.template_embed.description,
color=self.template_embed.color,
)
)
if self.template_embed.footer is not None:
first_page.set_footer(text=self.template_embed.footer)
embeds: List[Embed] = [first_page]
exts: List[dict] = []
for interaction in data:
if self.__ext_in_blacklist(interaction):
continue
if {
"name": interaction["extension"],
"interactions": [],
} not in exts:
exts.append(
{
"name": interaction["extension"],
"interactions": [],
}
)
for ext in exts:
value = "\n"
for interaction in data:
if self.__cmd_in_blacklist(interaction):
continue
if interaction["extension"] == ext["name"]:
ext["interactions"].append(interaction)
value += f"`{'/' if interaction['type'] in ['slash command', 'subcommand', 'subcommand group'] else '' if 'menu' in interaction['type'] else '/'}{interaction['name']}`, "
value = value[:-2] if value.endswith(", ") else value
first_page.add_field(
name=(
self.template_embed.no_category_name if ext["name"] is None else ext["name"]
),
value=value,
inline=False,
)
for ext in exts:
for i in range(0, len(ext["interactions"]), self.template_embed.fields_per_embed):
next_page = Embed(
title=f"{self.template_embed.no_category_name if ext['name'] is None else ext['name']} {i + 1} - {i + self.template_embed.fields_per_embed}",
color=self.template_embed.color,
)
for cmd in ext["interactions"][i : (i + self.template_embed.fields_per_embed)]:
cmd: dict
cmd_name: str = cmd["name"]
cmd_desc: str = cmd["description"]
cmd_opts: list = cmd.get("options", [])
cmd_type: str = cmd["type"]
desc = (
"No description"
if cmd_desc is None or cmd_desc == [] or not cmd_desc
else cmd_desc
) + "\nHow to use:"
how_to_use = f"\n```\n{f'/{cmd_name}' if 'menu' not in cmd_type else ('Right click on a ' + cmd['type'].replace(' menu', ''))} "
if isinstance(cmd_opts, list):
for opt in cmd_opts:
opt: dict
_type = typer_dict(opt["type"], opt.get("choices", []))
how_to_use += f"[{opt['name']}: {'optional ' if not opt['required'] else ''}{_type}], "
elif cmd_opts is not None:
how_to_use += cmd_opts
how_to_use = how_to_use[:-2] if how_to_use.endswith(", ") else how_to_use
how_to_use += "\n```"
next_page.add_field(name=cmd_name, value=desc + how_to_use, inline=False)
if self.template_embed.footer is not None:
next_page.set_footer(text=self.template_embed.footer)
embeds.append(next_page)
return await Paginator(
client=self.client,
ctx=ctx,
pages=embeds,
timeout=self.paginator_settings.timeout,
author_only=self.paginator_settings.author_only,
use_select=self.paginator_settings.use_select,
extended_buttons=self.paginator_settings.extended_buttons,
).run()
def __cmd_in_blacklist(self, interaction: dict):
return (
any(
(black in interaction["name"]) or (black in interaction["extension"])
for black in self.advanced_settings.blacklist
)
if self.advanced_settings.blacklist is not None
else False
)
def __ext_in_blacklist(self, interaction: dict):
return (
self.__cmd_in_blacklist(interaction) if interaction["extension"] is not None else False
)
|
python
|
import numpy as np
from sklearn.cluster import MeanShift# as ms
from sklearn.datasets.samples_generator import make_blobs
import matplotlib.pyplot as plt
centers = [[1,1], [5,5]]
X,y = make_blobs(n_samples = 10000, centers = centers, cluster_std = 1)
plt.scatter(X[:,0],X[:,1])
plt.show()
ms = MeanShift()
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
n_clusters_ = len(np.unique(labels))
print("Number of estimated clusters:", n_clusters_)
colors = 10 * ['r.','g.','b.','c.','k.','y.','m.']
print(colors)
print(labels)
for i in range(len(X)):
plt.plot(X[i][0],X[i][1], colors[labels[i]], markersize=10)
plt.scatter(cluster_centers[:,0], cluster_centers[:,1],
marker="x", s = 150, linewidths = 5, zorder=10)
plt.show();
|
python
|
# Copyright 2020 Arthur Coqué, Valentine Aubard, Pôle OFB-INRAE ECLA, UR RECOVER
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains a reader for S2_THEIA products.
This reader is dedicated to extract data from S2_THEIA_L2A.
Example::
reader = S2THEIAReader(**config)
reader.extract_bands()
reader.create_ds()
extracted_dataset = reader.dataset
"""
import warnings
from collections import defaultdict, namedtuple
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional
from zipfile import ZipFile
import numpy as np
import rasterio
import xarray as xr
from lxml import etree
from pyproj import CRS
from rasterio.windows import Window
from tqdm import tqdm
from sisppeo.readers.reader import Reader, Inputs
from sisppeo.utils.exceptions import InputError, ProductError
from sisppeo.utils.readers import (get_ij_bbox, decode_data,
resample_band_array,
resize_and_resample_band_array)
warnings.filterwarnings('ignore', category=rasterio.errors.NotGeoreferencedWarning)
S2THEIAInputs = namedtuple('S2THEIAInputs', Inputs._fields
+ ('out_resolution', 'theia_bands', 'theia_masks'))
def format_zippath(path: Path) -> str:
return f'zip://{str(path.resolve())}!/'
class S2THEIAReader(Reader):
"""A reader dedicated to extract data from S2_THEIA_L2A products.
For more information about THEIA S2 L2A products, please see:
https://labo.obs-mip.fr/multitemp/sentinel-2/theias-sentinel-2-l2a-product-format/
Attributes:
dataset: A dataset containing extracted data.
"""
def __init__(self,
input_product: Path,
requested_bands: List[str],
geom: Optional[dict] = None,
out_resolution: Optional[int] = None,
theia_bands: str = 'FRE',
theia_masks: Optional[Dict[str, Optional[List[int]]]] = None,
**_ignored) -> None:
"""See base class.
Args:
out_resolution: The wanted resolution of the output product. Used
when performing resampling operations.
theia_bands: The bands to be extracted. Must be either "SRE"
(for Surface REflectance) or "FRE" (for Flat REflectance).
theia_masks: A dict whose keys are the names of THEIA's masks to
extract ("CLM", "MG2" or "SAT") and vals are lists of bits to
use (e.g., [0, 1, 2] ; if None, all bits will be used for the
corresponding mask).
"""
super().__init__(input_product, requested_bands, geom)
if out_resolution not in (None, 10, 20):
raise InputError('"out_resolution" must be in (10, 20)')
if theia_bands not in ('FRE', 'SRE'):
raise InputError('"theia_bands" must be either "SRE" or "FRE"')
self._inputs = S2THEIAInputs(*self._inputs, out_resolution,
theia_bands, theia_masks)
def extract_bands(self) -> None:
"""See base class."""
# Check if data are compressed
compressed = False
if self._inputs.input_product.suffix == '.zip':
compressed = True
# Load metadata
metadata = self._load_metadata_from_MTD(compressed)
quantification_value, nodata = _get_product_coefs(metadata)
# Filter bands
if compressed:
with ZipFile(self._inputs.input_product) as archive:
root_path = format_zippath(self._inputs.input_product)
try:
requested_bands = [
(root_path + [_ for _ in archive.namelist() if _.endswith(
f'_{self._inputs.theia_bands}_{band}.tif'
)][0], band) for band in self._inputs.requested_bands
]
except IndexError as no_band:
msg = ('One of the requested bands is not found in the '
'given product.')
raise ProductError(msg) from no_band
else:
try:
requested_bands = [
(list(self._inputs.input_product.glob(
f'*_{self._inputs.theia_bands}_{band}.tif'
))[0], band) for band in self._inputs.requested_bands
]
except IndexError as no_band:
msg = ('One of the requested bands is not found in the given '
'product.')
raise ProductError(msg) from no_band
tmp = ['R1' if band in ('B2', 'B3', 'B4', 'B8') else 'R2'
for band in self._inputs.requested_bands]
if 'R1' in tmp and 'R2' in tmp:
requested_bands = [rband for _, rband
in sorted(zip(tmp, requested_bands),
reverse=True)]
if 'R1' in tmp:
min_res = 10
else:
min_res = 20
# Set the default resolution
if self._inputs.out_resolution is None:
self._inputs = self._inputs._replace(out_resolution=min_res)
# Extract data
data = {}
for path, band in tqdm(requested_bands, unit='bands'):
with rasterio.open(path) as subdataset:
if self._intermediate_data['x'] is None: # 1st extracted_band
if ((out_res := self._inputs.out_resolution)
> (in_res := subdataset.res[0])):
msg = (f'"out_resolution" must be <= {in_res} ; '
f'here, out_resolution={out_res}')
raise InputError(msg)
# Store the CRS
self._intermediate_data['crs'] = CRS.from_epsg(
subdataset.crs.to_epsg()
)
band_array, xy_bbox = self._extract_first_band(
subdataset, quantification_value, nodata
)
else:
band_array = self._extract_nth_band(
subdataset, xy_bbox, quantification_value, nodata
)
data[band] = band_array.reshape(1, *band_array.shape)
print('')
# Mask data
if self._inputs.theia_masks is not None:
for mask_name in self._inputs.theia_masks:
if self._inputs.out_resolution == 10:
suffix = '_R1'
else:
suffix = '_R2'
if compressed:
with ZipFile(self._inputs.input_product) as archive:
mask_path = (root_path + [
_ for _ in archive.namelist()
if _.endswith(f'_{mask_name}{suffix}.tif')
][0])
else:
mask_path = list((self._inputs.input_product / 'MASKS'
).glob(f'*_{mask_name}{suffix}.tif'))[0]
with rasterio.open(mask_path) as mask:
mask_array = self._extract_nth_band(mask, xy_bbox, 1, 1,
mask=True)
if self._inputs.theia_masks[mask_name] is None:
self._inputs.theia_masks[mask_name] = range(8)
bitmasks = [mask_array & (1 << b)
for b in self._inputs.theia_masks[mask_name]]
mask_array *= np.any(bitmasks, axis=0)
for band in data:
data[band] = np.where(mask_array, np.nan, data[band])
# Store outputs
self._intermediate_data['data'] = data
self._intermediate_data['metadata'] = metadata
def create_ds(self) -> None:
"""See base class."""
# Create the dataset
ds = xr.Dataset(
{key: (['time', 'y', 'x'], val) for key, val
in self._intermediate_data['data'].items()},
coords={
'x': ('x', self._intermediate_data['x']),
'y': ('y', self._intermediate_data['y']),
'time': [datetime.fromisoformat(self._intermediate_data[
'metadata']['ACQUISITION_DATE'].split('.')[0])]
}
)
crs = self._intermediate_data['crs']
# Set up coordinate variables
ds.x.attrs['axis'] = 'X'
ds.x.attrs['long_name'] = f'x-coordinate ({crs.name})'
ds.x.attrs['standard_name'] = "projection_x_coordinate"
ds.x.attrs['units'] = 'm'
ds.y.attrs['axis'] = 'Y'
ds.y.attrs['long_name'] = f'y-coordinate ({crs.name})'
ds.y.attrs['standard_name'] = "projection_y_coordinate"
ds.y.attrs['units'] = 'm'
ds.time.attrs['axis'] = 'T'
ds.time.attrs['long_name'] = 'time'
# Set up the 'grid mapping variable'
ds['crs'] = xr.DataArray(name='crs', attrs=crs.to_cf())
# Store metadata
ds['product_metadata'] = xr.DataArray()
for key, val in self._intermediate_data['metadata'].items():
ds.product_metadata.attrs[key] = val
ds.attrs['data_type'] = 'rho'
ds.attrs['theia_bands'] = self._inputs.theia_bands
if self._inputs.theia_masks is not None:
ds.attrs['suppl_masks'] = ', '.join(
f'THEIA_{key} ({"".join([str(b) for b in val])})'
for key, val in self._inputs.theia_masks.items()
)
self.dataset = ds
def _load_metadata_from_MTD(self, compressed):
if compressed:
with ZipFile(self._inputs.input_product) as archive:
path = [_ for _ in archive.namelist() if _.endswith('MTD_ALL.xml')][0]
with archive.open(path) as f:
tree = etree.parse(f)
else:
path = list(self._inputs.input_product.glob('*MTD_ALL.xml'))[0]
with open(path) as f:
tree = etree.parse(f)
root = tree.getroot()
metadata = defaultdict(dict)
for elem in root:
for subelem in elem:
if subelem.text.strip():
metadata[subelem.tag] = subelem.text
for att in subelem.attrib:
metadata[':'.join([subelem.tag, att])] = subelem.attrib.get(att)
for elem in root.iter('Horizontal_Coordinate_System'):
for subelem in elem:
metadata[subelem.tag] = subelem.text
for elem in root.iter('SPECIAL_VALUE'):
metadata[elem.get('name')] = elem.text
for elem in root.iter('QUALITY_INDEX'):
metadata[elem.get('name')] = elem.text
for elem in root.iter('Processing_Information'):
metadata[elem.find('NAME').text] = elem.find('VALUE').text
return metadata
def _compute_x_coords(self, x0, x1):
out_res = self._inputs.out_resolution
x_start = x0 + out_res / 2
x_stop = x1 - out_res / 2
self._intermediate_data['x'] = np.arange(x_start, x_stop + 1, out_res)
def _compute_y_coords(self, y0, y1):
out_res = self._inputs.out_resolution
y_start = y0 - out_res / 2
y_stop = y1 + out_res / 2
self._intermediate_data['y'] = np.arange(y_start, y_stop - 1, -out_res)
# pylint: disable=too-many-locals
# False positive.
def _extract_first_band(self, subdataset, quantification_value, nodata):
if self._inputs.ROI is not None:
self._reproject_geom()
row_start, col_start, row_stop, col_stop = get_ij_bbox(
subdataset,
self._intermediate_data['geom']
)
arr = subdataset.read(
1,
window=Window.from_slices((row_start, row_stop + 1),
(col_start, col_stop + 1))
)
# Update internal coords
x0, y0 = subdataset.transform * (col_start, row_start)
x1, y1 = subdataset.transform * (col_stop + 1, row_stop + 1)
else:
arr = subdataset.read(1)
# Update internal coords
x0, y0 = subdataset.transform * (0, 0)
x1, y1 = subdataset.transform * (subdataset.width,
subdataset.height)
# Decode extracted data
band_array = decode_data(arr, 1 / quantification_value, nodata)
if (out_res := self._inputs.out_resolution) != subdataset.res[0]:
band_array = resample_band_array(band_array, subdataset.res[0],
out_res)
# Compute projected coordinates
self._compute_x_coords(x0, x1)
self._compute_y_coords(y0, y1)
# Update internal coords
x1 -= 1
y1 += 1
return band_array, [x0, y0, x1, y1]
# pylint: disable=too-many-locals
# More readable if coordonates are explicitely extracted from the bbox.
def _extract_nth_band(self, subdataset, xy_bbox, quantification_value,
nodata, mask=False):
x0, y0, x1, y1 = xy_bbox
row_start, col_start = subdataset.index(x0, y0)
row_stop, col_stop = subdataset.index(x1, y1)
arr = subdataset.read(
1,
window=Window.from_slices(
(row_start, row_stop + 1),
(col_start, col_stop + 1)
)
)
if mask:
band_array = arr
else:
# Decode extracted data
band_array = decode_data(arr, 1 / quantification_value, nodata)
ij_bbox = [row_start, col_start, row_stop, col_stop]
if (out_res := self._inputs.out_resolution) != subdataset.res[0]:
band_array = resize_and_resample_band_array(band_array, ij_bbox,
subdataset.res[0],
out_res)
return band_array
def _get_product_coefs(metadata):
"""Gets both quantification and nodata values (to compute correct reflectances)"""
quantification_value = float(metadata['REFLECTANCE_QUANTIFICATION_VALUE'])
nodata = float(metadata['nodata'])
return quantification_value, nodata
|
python
|
from tkinter import *
root = Tk()
root.geometry('225x230')
root.resizable(False, False)
root.title('Learning English')
def showMenu():
menu1.pack()
menu2.pack()
def hideMenu():
menu1.pack_forget()
menu2.pack_forget()
menu1 = Button(text = 'Can you translate?\nENG --> RUS', width = 300, height = 7)
menu2 = Button(text = 'Can you translate?\nRUS --> ENG', width = 300, height = 7)
showMenu()
root.mainloop()
|
python
|
import numpy as np
from time import time
import xorshift
rng = xorshift.Xoroshiro()
rng2 = xorshift.Xorshift128plus()
def output(name, start, end):
elapsed = (end - start) * 1000
per_iter = elapsed / iters
per_rv = per_iter / count * 1e6
print '%s took %.2f ms/iter, %.2f ns per float' % (name, per_iter, per_rv)
def bench_binomial(iters, count, N, p):
print "Benchmarking generation of %d Bin(%d,%f) RVs, %d iterations" % (
count, N, p, iters)
print "------------------------------"
start = time()
for i in xrange(iters):
np.random.binomial(N, p, count)
end = time()
output('numpy', start, end)
start = time()
for i in xrange(iters):
rng.binomial(N, p, count)
end = time()
output('xoroshiro', start, end)
start = time()
for i in xrange(iters):
rng2.binomial(N, p, count)
end = time()
output('xoroshift128plus', start, end)
def bench_uniform(iters, count):
print "Benchmarking generation of %d Uniform(0,1) RVs, %d iterations" % (
count, iters)
print "------------------------------"
start = time()
for i in xrange(iters):
np.random.uniform(size=count)
end = time()
output('numpy', start, end)
start = time()
for i in xrange(iters):
rng.uniform(size=count)
end = time()
output('xoroshiro', start, end)
start = time()
for i in xrange(iters):
rng2.uniform(size=count)
end = time()
output('xoroshift128plus', start, end)
iters = 10
count = 131072
N = 50
p = 0.25
bench_binomial(iters, count, N, p)
print
bench_uniform(iters, count)
|
python
|
from PySide2.QtCore import Qt
from PySide2.QtWidgets import QWidget, QHBoxLayout, QLabel, QSlider
from traitlets import HasTraits, Unicode, Int, observe
from regexport.views.utils import HasWidget
class LabelledSliderModel(HasTraits):
label = Unicode()
min = Int(default_value=0)
max = Int()
value = Int(default_value=1000)
label2 = Unicode(allow_none=True)
def __repr__(self):
return f"{self.__class__.__name__}(label='{self.label}', min={self.min}, max={self.max}, value={self.value})"
@observe('value')
def _clamp_value_to_be_inside_bounded_range(self, change):
value = change.new
print(self)
if value < self.min:
self.value = 0
elif value > self.max:
self.value = self.max
class LabelledSliderView(HasWidget):
def __init__(self, model: LabelledSliderModel):
widget = QWidget()
HasWidget.__init__(self, widget=widget)
layout = QHBoxLayout()
widget.setLayout(layout)
self.label = QLabel()
layout.addWidget(self.label)
self.slider = QSlider()
self.slider.setMinimum(model.min)
self.slider.setMaximum(model.max)
self.slider.setValue(model.value)
self.slider.setOrientation(Qt.Horizontal)
layout.addWidget(self.slider)
self.value_label = QLabel()
layout.addWidget(self.value_label)
self.model = model
self.model.observe(self.render)
self.slider.valueChanged.connect(self._update_model_value)
self.render()
def render(self, change=None):
self.label.setText(self.model.label)
self.slider.setMinimum(self.model.min)
self.slider.setMaximum(self.model.max)
self.value_label.setText(str(self.model.value))
def _update_model_value(self, value: int):
self.model.value = value
|
python
|
'''
Configure web app settings. Updating or removing application settings will cause an app recycle.
'''
from .... pyaz_utils import _call_az
def list(name, resource_group, slot=None):
'''
Get the details of a web app's settings.
Required Parameters:
- name -- name of the web app. If left unspecified, a name will be randomly generated. You can configure the default using `az configure --defaults web=<name>`
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- slot -- the name of the slot. Default to the productions slot if not specified
'''
return _call_az("az webapp config appsettings list", locals())
def set(name, resource_group, settings=None, slot=None, slot_settings=None):
'''
Set a web app's settings.
Required Parameters:
- name -- name of the web app. If left unspecified, a name will be randomly generated. You can configure the default using `az configure --defaults web=<name>`
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- settings -- space-separated appsettings in a format of `<name>=<value>`
- slot -- the name of the slot. Default to the productions slot if not specified
- slot_settings -- space-separated slot appsettings in a format of either `<name>=<value>` or `@<json_file>`
'''
return _call_az("az webapp config appsettings set", locals())
def delete(name, resource_group, setting_names, slot=None):
'''
Delete web app settings.
Required Parameters:
- name -- name of the web app. If left unspecified, a name will be randomly generated. You can configure the default using `az configure --defaults web=<name>`
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- setting_names -- space-separated appsettings names
Optional Parameters:
- slot -- the name of the slot. Default to the productions slot if not specified
'''
return _call_az("az webapp config appsettings delete", locals())
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bandit Algorithms
This script follows Chapter 2 of Sutton and Barto (2nd) and simply reproduces
figures 2.2 to 2.5.
Author: Gertjan van den Burg
License: MIT
Copyright: (c) 2020, The Alan Turing Institute
"""
import abc
import math
import matplotlib.pyplot as plt
import numpy as np
import random
import tqdm
from matplotlib import ticker
from scipy.special import logsumexp
class TestBed:
""" k-Armed Test Bed """
def __init__(self, k=10, baseline=0):
self.k = k
self.baseline = baseline
self._opt_action = None
@property
def opt_action(self):
if self._opt_action is None:
raise ValueError("Not initialised properly!")
return self._opt_action
def step(self, action):
mean = self._qstar[action]
return random.gauss(mean, 1)
def reset(self):
self._qstar = []
for _ in range(self.k):
self._qstar.append(random.gauss(self.baseline, 1))
self._opt_action = argmax(lambda a: self._qstar[a], range(self.k))
return self
class Bandit(metaclass=abc.ABCMeta):
def __init__(self, k=10, initial_value=0, stepsize="avg"):
self.k = k
self.initial_value = initial_value
self.stepsize = stepsize
def reset(self):
# Reset the state of the bandit.
self.Q = {a: self.initial_value for a in range(self.k)}
self.N = {a: 0 for a in range(self.k)}
if self.stepsize == "avg":
self.alpha = lambda a: 1 if self.N[a] == 0 else 1.0 / self.N[a]
else:
self.alpha = lambda a: self.stepsize
@abc.abstractmethod
def get_action(self):
""" Choose an action to take """
def record(self, action, reward):
""" Record the reward of the action taken """
# Follows algorithm on page 32
A, R = action, reward
self.N[A] += 1
self.Q[A] += self.alpha(A) * (R - self.Q[A])
class EpsilonGreedy(Bandit):
def __init__(self, k=10, epsilon=0.1, initial_value=0, stepsize="avg"):
super().__init__(k=k, initial_value=initial_value, stepsize=stepsize)
self.epsilon = epsilon
def get_action(self):
if random.random() <= self.epsilon:
return random.randint(0, self.k - 1)
return argmax(lambda a: self.Q[a], range(self.k))
def label(self):
return (
r"$\varepsilon$-greedy ($\varepsilon = %g$, $Q_1 = %g$, $\alpha = %s$)"
% (self.epsilon, self.initial_value, self.stepsize)
)
class UpperConfidence(Bandit):
def __init__(self, k=10, c=2.0):
super().__init__(k=k)
self.c = c
def reset(self):
super().reset()
self.t = 0
def get_action(self):
self.t += 1
func = lambda a: self.Q[a] + self.c * math.sqrt(
math.log(self.t) / self.N[a]
)
for a in range(self.k):
# first pick all actions at least once
if self.N[a] == 0:
return a
return argmax(func, range(self.k))
def label(self):
return r"UCB ($c = %g$)" % self.c
class GradientBandit(Bandit):
def __init__(self, k=10, stepsize="avg", use_baseline=True):
super().__init__(k=k, stepsize=stepsize)
self.use_baseline = use_baseline
def reset(self):
super().reset()
self.H = {a: 0 for a in range(self.k)}
self.probs, self.Rtbar, self.t = None, 0, 0
def get_action(self):
self.t += 1
lse = logsumexp(list(self.H.values()))
self.probs = [math.exp(self.H[a] - lse) for a in range(self.k)]
a = random.choices(list(range(self.k)), weights=self.probs, k=1)
return a[0]
def record(self, action, reward):
At, Rt = action, reward
for a in range(self.k):
self.H[a] += (
self.alpha(a) * (Rt - self.Rtbar) * ((At == a) - self.probs[a])
)
# Note that the choice of baseline is somewhat arbitrary, but the
# average reward works well in practice. See discussion on page 40 of
# Sutton & Barto.
if self.use_baseline:
self.Rtbar += 1 / self.t * (Rt - self.Rtbar)
def label(self):
bsln = "with" if self.use_baseline else "without"
return r"Gradient ($\alpha = %s$, %s baseline)" % (self.stepsize, bsln)
def argmax(func, args):
"""Simple argmax function """
m, inc = -float("inf"), None
for a in args:
if (v := func(a)) > m:
m, inc = v, a
return inc
def plot_common(axis, data, bandits):
axis.plot(data.T)
axis.legend([b.label() for b in bandits])
axis.set_xlabel("Steps")
def make_reward_plot(axis, avg_rewards, bandits):
plot_common(axis, avg_rewards, bandits)
axis.set_ylabel("Average\nreward", rotation="horizontal", ha="center")
def make_optact_plot(axis, avg_optact, bandits):
plot_common(axis, avg_optact, bandits)
axis.set_yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0])
axis.set_ylim(0, 1)
axis.yaxis.set_major_formatter(ticker.PercentFormatter(1.0))
axis.set_ylabel("%\nOptimal\naction", rotation="horizontal", ha="center")
def run_experiment(env, bandits, repeats, steps):
B = len(bandits)
rewards = np.zeros((B, repeats, steps))
optact = np.zeros((B, repeats, steps))
for r in tqdm.trange(repeats):
# reset the bandits and the environment
[bandit.reset() for bandit in bandits]
env.reset()
for i in range(steps):
for b in range(B):
bandit = bandits[b]
action = bandit.get_action()
reward = env.step(action)
bandit.record(action, reward)
rewards[b, r, i] = reward
optact[b, r, i] = action == env.opt_action
avg_rewards = rewards.mean(axis=1)
avg_optact = optact.mean(axis=1)
return avg_rewards, avg_optact
def figure_2_2(k=10, repeats=2000, steps=1000, epsilons=None):
env = TestBed(k=k)
epsilons = epsilons or [0.1, 0.01, 0]
bandits = [EpsilonGreedy(k=k, epsilon=e) for e in epsilons]
avg_rewards, avg_optact = run_experiment(env, bandits, repeats, steps)
fig, axes = plt.subplots(2, 1)
make_reward_plot(axes[0], avg_rewards, bandits)
make_optact_plot(axes[1], avg_optact, bandits)
plt.show()
def figure_2_3(k=10, repeats=2000, steps=1000):
env = TestBed(k=k)
bandits = [
EpsilonGreedy(k=k, epsilon=0.1, initial_value=0, stepsize=0.1),
EpsilonGreedy(k=k, epsilon=0, initial_value=5, stepsize=0.1),
]
_, avg_optact = run_experiment(env, bandits, repeats, steps)
fig, axis = plt.subplots(1, 1)
make_optact_plot(axis, avg_optact, bandits)
plt.show()
def figure_2_4(k=10, repeats=2000, steps=1000, c=2):
env = TestBed(k=k)
bandits = [EpsilonGreedy(k=k, epsilon=0.1), UpperConfidence(k=k, c=c)]
avg_rewards, _ = run_experiment(env, bandits, repeats, steps)
fig, axis = plt.subplots(1, 1)
make_reward_plot(axis, avg_rewards, bandits)
plt.show()
def figure_2_5(k=10, repeats=1000, steps=1000):
env = TestBed(k=k, baseline=4)
bandits = [
GradientBandit(k=k, stepsize=0.1),
GradientBandit(k=k, stepsize=0.4),
GradientBandit(k=k, stepsize=0.1, use_baseline=False),
GradientBandit(k=k, stepsize=0.4, use_baseline=False),
]
_, avg_optact = run_experiment(env, bandits, repeats, steps)
fig, axis = plt.subplots(1, 1)
make_optact_plot(axis, avg_optact, bandits)
plt.show()
def playground(k=10, repeats=2000, steps=1000):
""" Function for if you want to play around with bandits"""
env = TestBed(k=k)
bandits = [
EpsilonGreedy(k=k, epsilon=0.01),
EpsilonGreedy(k=k, initial_value=5, epsilon=0.1),
UpperConfidence(k=k, c=2),
GradientBandit(k=k, stepsize=0.1),
]
avg_reward, avg_optact = run_experiment(env, bandits, repeats, steps)
fig, axes = plt.subplots(2, 1)
make_reward_plot(axes[0], avg_reward, bandits)
make_optact_plot(axes[1], avg_optact, bandits)
plt.show()
def main():
# enable or disable plots you want to see
figure_2_2()
figure_2_3()
figure_2_4()
figure_2_5()
# playground(repeats=1000, steps=5000)
if __name__ == "__main__":
main()
|
python
|
import unittest
import pandas as pd
from tests.test_utils import TestUtils
from enda.ml_backends.sklearn_estimator import EndaSklearnEstimator
try:
from sklearn.linear_model import LinearRegression, SGDRegressor
from sklearn.ensemble import AdaBoostRegressor, RandomForestRegressor
from sklearn.svm import SVR, LinearSVR
from sklearn.preprocessing import PolynomialFeatures, StandardScaler
from sklearn.neighbors import KNeighborsRegressor
from sklearn.pipeline import Pipeline
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.feature_selection import SelectFromModel
from sklearn.neural_network import MLPRegressor
except ImportError as e:
raise ImportError("scikit-learn is required is you want to test enda's EndaSklearnEstimator. "
"Try: pip install scikit-learn>=0.24.1", e)
class TestEndaSklearnEstimator(unittest.TestCase):
def test_estimators(self):
train_set, test_set, target_name = TestUtils.read_example_a_train_test_sets()
for estimator in [
LinearRegression(),
AdaBoostRegressor(),
SVR(),
Pipeline([('poly', PolynomialFeatures(degree=3)),
('linear', LinearRegression(fit_intercept=False))]),
Pipeline([('standard_scaler', StandardScaler()),
('sgd_regressor', SGDRegressor())]),
KNeighborsRegressor(n_neighbors=10),
GaussianProcessRegressor(),
Pipeline([('feature_selection', SelectFromModel(LinearSVR())),
('classification', RandomForestRegressor())]),
Pipeline([
('standard_scaler', StandardScaler()),
('mlp_regressor', MLPRegressor(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 5), random_state=1))]
),
]:
m = EndaSklearnEstimator(estimator)
m.train(train_set, target_name)
prediction = m.predict(test_set, target_name)
# prediction must preserve the pandas.DatetimeIndex
self.assertIsInstance(prediction.index, pd.DatetimeIndex)
self.assertTrue((test_set.index == prediction.index).all())
|
python
|
""" Generate new files from templates. """
import argparse
import sys
from typing import Optional
from contextlib import contextmanager
import os
import shlex
from itertools import chain
from cjrh_template import Template
import biodome
__version__ = '2017.10.3'
@contextmanager
def file_or_stdout(args, filename: Optional[str] = None):
"""If target_filename is None, just strip off the .templitz
extension off args.template and used that as the target name."""
for path, hit in all_templates(args):
if args.template in hit:
break
else:
raise FileNotFoundError('Template not found!')
if args.stdout:
f = sys.stdout
else:
# Remove the trailing ".templitz"
fname = filename or hit.rpartition('.')[0]
target = os.path.join(args.outdir, fname)
f = open(target, 'w+')
try:
yield f
finally:
if f is not sys.stdout:
f.close()
def all_templates(args):
pathstr: str = biodome.environ.get('TEMPLITZ_PATH', '').split(os.pathsep)
# Current dir first, and /library of templitz.py dir as last resort
paths = chain(
[os.getcwd()],
pathstr,
[os.path.join(os.path.dirname(__file__), 'library')]
)
for p in paths:
if not os.path.exists(p):
continue
for fname in os.listdir(p):
if fname.endswith('.templitz'):
yield p, fname
def load_template(args):
paths = biodome.environ.get('TEMPLITZ_PATH', '').split(os.pathsep)
for path, hit in all_templates(args):
if args.template in hit:
break
else:
msg = (f'Error: template "{args.template}" not found in any of '
f'the following locations:')
msg += '\n'.join(paths)
raise FileNotFoundError(msg)
with open(os.path.join(path, hit)) as f:
data = f.read()
return Template(data)
def subs(args):
tmpl = load_template(args)
params = {
x.partition('=')[0]: x.partition('=')[2] for x in args.params
}
output = tmpl.safe_substitute(params)
# Strip out lines starting with "#templitz" and process settings in
# them.
settings = {}
final_lines = []
for line in output.splitlines(False):
if line.startswith('#templitz'):
data = line.partition('#templitz')[2]
for item in shlex.split(data):
key, _, value = item.partition('=')
# Handle toggles/bools automatically
settings[key] = value.strip('"') or True
else:
final_lines.append(line)
output = '\n'.join(final_lines)
filename = settings.get('filename')
with file_or_stdout(args, filename=filename) as f:
f.write(output)
def info(args):
tmpl = load_template(args)
print('The template has the following vars: ')
print()
for ph in tmpl.placeholders():
print(' ${%s}' % ph)
print()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--template')
parser.add_argument('-i', '--info', help='Information about the templit.',
action='store_true')
parser.add_argument(
'-l', '--list', help='List all available templitz.',
action='store_true'
)
parser.add_argument('-s', '--stdout', action='store_true',
help='Write to stdout instead of file.')
parser.add_argument('-o', '--outdir', help='Output directory.',
default=os.getcwd())
parser.add_argument(
'-p', '--params', nargs='+', default=[]
)
args = parser.parse_args()
try:
if args.info:
info(args)
elif args.list:
for path, fname in all_templates(args):
print(path, fname)
else:
subs(args)
except FileNotFoundError as e:
print(f'Error: {e!s}')
if __name__ == '__main__':
main()
|
python
|
import os
import paramiko
def get_private_key():
# or choose the location and the private key file on your client
private_key_file = os.path.expanduser("/home/ubuntu/.ssh/id_rsa")
return paramiko.RSAKey.from_private_key_file(private_key_file, password='')
def get_ssh(myusername, myhostname, myport):
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
#ssh.connect(myhostname, username=myusername, port=myport, pkey = private_key)
ssh.connect(myhostname, username=myusername, port=myport)
return ssh
def block_exec(ssh, command):
stdin, stdout, stderr = ssh.exec_command(command)
exit_status = stdout.channel.recv_exit_status()
return
def clear_bw_config2(ssh, interface):
block_exec(ssh, "sudo tc qdisc del dev %s root" % interface)
block_exec(ssh, "sudo tc qdisc del dev %s ingress" % interface)
block_exec(ssh, "sudo tc class del dev %s root" % interface)
block_exec(ssh, "sudo tc filter del dev %s root" % interface)
def exec_bw_config2(ssh, interface, bandwidth, ip, subnetmasklength):
clear_bw_config2(ssh, interface)
# create a qdisc (queuing discipline), 12 is default class
cmd1 = "sudo tc qdisc add dev %s root handle 1: htb default 12" % interface
print cmd1
block_exec(ssh, cmd1)
# define the performance for default class
cmd2 = "sudo tc class add dev %s parent 1: classid 1:1 htb rate %dmbps ceil %dmbps" % (interface, bandwidth, bandwidth )
print cmd2
block_exec(ssh, cmd2)
filter_cmd = "sudo tc filter add dev %s protocol ip parent 1:0 prio 1 u32 match ip dst %s/%d flowid 1:1" % (interface, ip, subnetmasklength)
print filter_cmd
block_exec(ssh, filter_cmd)
def main():
myhosts = ["10.0.1.193", "10.0.1.192", "10.0.1.191", "10.0.1.190"]
username="ubuntu"
port=22
#key = ""get_private_key()
for host in myhosts:
ssh = get_ssh(username, host, port)
clear_bw_config2(ssh, "eth0")
exec_bw_config2(ssh, "eth0", 128, "10.0.0.0", 8)
# iterate over hosts here
# for everyhost,
# 1. create ssh connection
# 2. run the exec_bw_config with params
return
if __name__ == '__main__':
main()
|
python
|
import json
from unittest.mock import patch
from ddt import ddt
from django.test import tag
from django.urls import reverse
from requests.exceptions import HTTPError
from rest_framework import status
from .test_setup import TestSetUp
@tag('unit')
@ddt
class ViewTests(TestSetUp):
def test_get_catalogs(self):
"""Test that calling the endpoint /api/catalogs returns a list of
catalogs"""
url = reverse('api:catalogs')
with patch('api.views.requests') as requests:
http_resp = requests.return_value
requests.get.return_value = http_resp
http_resp.json.return_value = [{
"test": "value"
}]
http_resp.status_code = 200
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_catalogs_error(self):
"""Test that calling the endpoint /api/catalogs returns an
http error if an exception a thrown while reaching out to XIS"""
url = reverse('api:catalogs')
errorMsg = "error reaching out to configured XIS API; " + \
"please check the XIS logs"
with patch('api.views.requests.get') as get_request:
get_request.side_effect = [HTTPError]
response = self.client.get(url)
responseDict = json.loads(response.content)
self.assertEqual(response.status_code,
status.HTTP_500_INTERNAL_SERVER_ERROR)
self.assertEqual(responseDict['message'], errorMsg)
def test_get_experiences(self):
"""Test that calling /api/experiences returns a list of
experiences"""
url = reverse('api:experiences')
with patch('api.views.requests') as requests:
http_resp = requests.return_value
requests.get.return_value = http_resp
http_resp.json.return_value = [{
"test": "value"
}]
http_resp.status_code = 200
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_experiences_error(self):
"""Test that calling /api/experiences returns an error if the call
to the XIS throws an http error"""
url = reverse('api:experiences')
errorMsg = "error reaching out to configured XIS API; " + \
"please check the XIS logs"
with patch('api.views.requests.get') as get_request:
get_request.side_effect = [HTTPError]
response = self.client.get(url)
responseDict = json.loads(response.content)
self.assertEqual(response.status_code,
status.HTTP_500_INTERNAL_SERVER_ERROR)
self.assertEqual(responseDict['message'], errorMsg)
def test_get_experience(self):
"""Test that calling /api/experience/id returns an experience"""
doc_id = '123456'
url = reverse('api:experience', args=(doc_id,))
with patch('api.views.requests') as requests:
http_resp = requests.return_value
requests.get.return_value = http_resp
http_resp.json.return_value = {
"test": "value"
}
http_resp.status_code = 200
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_experience_error(self):
"""Test that calling /api/experience/id returns an error if the call
to the XIS throws an http error"""
doc_id = '123456'
url = reverse('api:experience', args=(doc_id,))
errorMsg = "error reaching out to configured XIS API; " + \
"please check the XIS logs"
with patch('api.views.requests.get') as get_request:
get_request.side_effect = [HTTPError]
response = self.client.get(url)
responseDict = json.loads(response.content)
self.assertEqual(response.status_code,
status.HTTP_500_INTERNAL_SERVER_ERROR)
self.assertEqual(responseDict['message'], errorMsg)
def test_patch_experience(self):
"""Test that calling /api/experience/id updates an experience"""
doc_id = '123456'
url = reverse('api:experience', args=(doc_id,))
with patch('api.views.requests') as requests:
http_resp = requests.return_value
requests.patch.return_value = http_resp
http_resp.json.return_value = {
"test": "value"
}
http_resp.status_code = 200
response = self.client.patch(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_patch_experience_error(self):
"""Test that calling /api/experience/id returns an error if the call
to the XIS throws an http error"""
doc_id = '123456'
url = reverse('api:experience', args=(doc_id,))
errorMsg = "error reaching out to configured XIS API; " + \
"please check the XIS logs"
with patch('api.views.requests.patch') as patch_request:
patch_request.side_effect = [HTTPError]
response = self.client.patch(url)
responseDict = json.loads(response.content)
self.assertEqual(response.status_code,
status.HTTP_500_INTERNAL_SERVER_ERROR)
self.assertEqual(responseDict['message'], errorMsg)
|
python
|
import sys
stack = []
def recursion(stack, last):
if stack:
now = stack.pop()
else:
return -1
s = 0
while now != -last:
foo = recursion(stack, now)
if foo == -1:
return -1
s += foo
if stack:
now = stack.pop()
else:
break
if now != -last or s >= last:
return -1
return last
for line in sys.stdin:
stack = [int(a) for a in line.split()]
top = stack.pop()
ans = recursion(stack, top)
if ans == -1:
print(':-( Try again.')
else:
print(':-) Matrioshka!')
|
python
|
from chaco.api import ArrayPlotData
from enable.component_editor import ComponentEditor
from traits.api import (
List, Instance, Either, Str, on_trait_change, Tuple, Any,
Property)
from traitsui.api import (
TabularEditor, View, UItem, VGroup, EnumEditor, HGroup, Item)
from traitsui.tabular_adapter import TabularAdapter
from pyfibre.gui.image_tab import ImageTab
class ImageMetricTab(ImageTab):
data = Any
_data = Property(List(Tuple), depends_on='data')
headers = Property(List(Str), depends_on='data')
tabular_adapter = Instance(TabularAdapter, ())
x_label = Str
y_label = Str
_display_cols = Property(List(Str), depends_on='data')
#: Selected evaluation steps in the table
_selected_rows = Either(List(Tuple), None)
def default_traits_view(self):
editor = TabularEditor(
adapter=self.tabular_adapter,
show_titles=True,
selected="_selected_rows",
auto_update=False,
multi_select=True,
editable=False,
)
return View(
VGroup(
HGroup(
VGroup(
UItem('selected_label',
style='simple'),
UItem('image_plot',
editor=ComponentEditor(),
show_label=False),
),
VGroup(
HGroup(
Item("x_label",
editor=EnumEditor(name="_display_cols")),
Item("y_label",
editor=EnumEditor(name="_display_cols")),
),
UItem('component',
editor=ComponentEditor(),
show_label=False),
),
),
UItem("_data", editor=editor),
layout="split"
)
)
def _plot_data_default(self):
plot_data = ArrayPlotData()
for data in ['x', 'y']:
plot_data.set_data(data, [])
return plot_data
def _get__data(self):
if self.data is None:
return []
fibre_data = self.data.to_records()
return fibre_data.tolist()
def _get__display_cols(self):
if self.data is None:
return []
return [
name for dtype, name in zip(
self.data.dtypes, self.data.columns)
if dtype in ["int64", "float64"]
]
def _get_headers(self):
if self.data is None:
return []
return [''] + list(self.data.columns)
def customise_plot(self, plot):
plot.plot(("x", "y"), type="scatter", color="blue")
def _tabular_adapter_default(self):
return TabularAdapter(columns=self.headers)
@on_trait_change("headers")
def _update_adapter(self):
self.tabular_adapter.columns = self.headers
@on_trait_change("data")
def _update_data(self):
self._update_plot_x_data()
self._update_plot_y_data()
@on_trait_change("x_label")
def _update_plot_x_data(self):
""" Update data points displayed by the x axis.
This method is called when the `x` axis is changed.
"""
if self.x_label == "":
self.plot_data.set_data("x", [])
else:
self.plot.x_axis.title = self.x_label
index = self.headers.index(self.x_label)
x_data = [row[index] for row in self._data]
self.plot_data.set_data("x", x_data)
@on_trait_change("y_label")
def _update_plot_y_data(self):
""" Update data points displayed by the y axis.
This method is called when the `y` axis is changed.
"""
if self.y_label == "":
self.plot_data.set_data("y", [])
else:
self.plot.y_axis.title = self.y_label
index = self.headers.index(self.y_label)
y_data = [row[index] for row in self._data]
self.plot_data.set_data("y", y_data)
def reset_tab(self):
super().reset_tab()
self.data = None
|
python
|
from .device import (ORTDeviceInfo, get_available_devices_info,
get_cpu_device_info)
from .InferenceSession import InferenceSession_with_device
|
python
|
#!/usr/bin/env python3
import sys
import numpy as np
with open(sys.argv[1]) as infile:
rows = [a.strip() for a in infile]
def do_round(cube):
x, y, z = cube.shape
new_cube = np.zeros(cube.shape, dtype=bool)
for i in range(x):
for j in range(y):
for k in range(z):
neighbors = cube[max(i - 1, 0):min(i + 2, x), max(j - 1, 0):min(j + 2, y), max(k - 1, 0):min(k + 2, z)]
n_occupied = np.count_nonzero(neighbors)
# print((i,j,k), neighbors.shape, cube[i,j,k], n_occupied)
if cube[i, j, k]:
if 3 <= n_occupied <= 4: # one higher than specs since we're counting this too
new_cube[i, j, k] = True
else:
if n_occupied == 3:
new_cube[i, j, k] = True
return new_cube
square_size = 12 + len(rows)
start_cube = np.zeros((square_size, square_size, 13), dtype=bool)
for i, row in enumerate(rows):
for j, active in enumerate(row):
if active == '#':
start_cube[i + 6, j + 6, 6] = True
for i in range(6):
cube = do_round(start_cube)
print("Round {}: {} active".format(i+1, np.count_nonzero(cube)))
start_cube = cube
|
python
|
"""bsw URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from django.conf.urls import url, include
from .views import RingTimeViewSet, DefaultRingTimeViewSet, CallForwarding, RemoveCallForwarding, TestingMDNViewSet, QueryCallForwarding, QueryRingTime
from rest_framework import routers
ring_time_router = routers.DefaultRouter()
ring_time_router.register(r'set-ring-time', RingTimeViewSet)
default_ring_time_router = routers.DefaultRouter()
default_ring_time_router.register(r'default-ring-time', DefaultRingTimeViewSet)
call_forwarding_router = routers.DefaultRouter()
call_forwarding_router.register(r'set-forwarding-number', CallForwarding)
remove_forwarding_router = routers.DefaultRouter()
remove_forwarding_router.register(r'remove-forwarding-number', RemoveCallForwarding)
add_testing_mdn = routers.DefaultRouter()
add_testing_mdn.register(r'add-mdn', TestingMDNViewSet)
query_forwarding_router = routers.DefaultRouter()
query_forwarding_router.register('query-forwarding-number', QueryCallForwarding)
query_ring_time_router = routers.DefaultRouter()
query_ring_time_router.register('query-ring-time', QueryRingTime)
# mock up documentation for SI from the raw data from bsw/callforwarding/api/set-ring-time
urlpatterns = [
url('^api/', include(ring_time_router.urls)),
url(r'^api/', include(default_ring_time_router.urls)),
url(r'^api/', include(call_forwarding_router.urls)),
url(r'^api/', include(remove_forwarding_router.urls)),
url(r'^api/', include(add_testing_mdn.urls)),
url(r'^api/', include(query_forwarding_router.urls)),
url(r'^api/', include(query_ring_time_router.urls))
]
|
python
|
from pandas import DataFrame
from typing import List, Tuple, Dict
from models.team_stats import TeamSkeletonStats
def get_opponents_in_given_fixture_list(team_id: int, fixtures: DataFrame) -> List[int]:
home_opponents = fixtures.loc[fixtures['team_a'] == team_id, 'team_h']
away_opps = fixtures.loc[fixtures['team_h'] == team_id, 'team_a']
return list(home_opponents.values) + list(away_opps.values)
def get_prev_and_next_opponents(
team_id: int,
number_of_opponents_to_get: int,
fixtures: DataFrame
) -> Tuple[List[int], List[int]]:
team_fixtures = fixtures[(fixtures['team_a'] == team_id) | (fixtures['team_h'] == team_id)]
completed_fixtures = team_fixtures[team_fixtures['finished'] == True]
upcoming_fixtures = team_fixtures[team_fixtures['finished'] == False]
# all_previous_opponents = get_opponents_in_given_fixture_list(team_id, completed_fixtures)
previous_x_opponents = get_opponents_in_given_fixture_list(team_id, completed_fixtures.tail(number_of_opponents_to_get))
next_x_opponents = get_opponents_in_given_fixture_list(team_id, upcoming_fixtures.head(number_of_opponents_to_get))
return previous_x_opponents, next_x_opponents
def get_current_team_stats(team: str, team_id: int, teams_data: Dict[str, DataFrame]) -> TeamSkeletonStats:
relevant_team_data = teams_data[team]
xg_total = relevant_team_data.xG.sum()
xga_total = relevant_team_data.xGA.sum()
npxg_total = relevant_team_data.npxG.sum()
npxga_total = relevant_team_data.npxGA.sum()
goals_scored_total = relevant_team_data.scored.sum()
goals_conceded_total = relevant_team_data.missed.sum()
games_played = len(relevant_team_data.index)
xg_avg = xg_total / games_played
xga_avg = xga_total / games_played
goals_scored_avg = goals_scored_total / games_played
goals_conceded_avg = goals_conceded_total / games_played
return TeamSkeletonStats(
xg_total=xg_total,
xga_total=xga_total,
npxg_total=npxg_total,
npxga_total=npxga_total,
g_total=goals_scored_total,
ga_total=goals_conceded_total,
games_played=games_played,
xg_avg=xg_avg,
xga_avg=xga_avg,
goals_scored_avg=goals_scored_avg,
goals_conceded_avg=goals_conceded_avg,
name=team,
team_id=team_id,
)
|
python
|
#!/usr/bin/env python3
# coding:utf-8
from pickle import load
with open("banner.p", "rb") as f:
print(load(f))
|
python
|
#!/usr/bin/env python
import re
from setuptools import setup
def get_version(filename):
f = open(filename).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", f).group(1)
version = get_version('flake8_assertive.py')
description = open('README.rst').read() + "\n\n" + open('CHANGELOG.rst').read()
github_url = 'https://github.com/jparise/flake8-assertive'
setup(
name='flake8-assertive',
version=version,
description='Flake8 unittest assert method checker',
long_description=description,
author='Jon Parise',
author_email='[email protected]',
keywords='flake8 testing unittest assert',
url=github_url,
download_url=github_url + '/tarball/' + version,
license='MIT',
py_modules=['flake8_assertive'],
entry_points={
'flake8.extension': ['A50 = flake8_assertive:Checker'],
},
install_requires=['flake8'],
tests_require=['flake8>=3.0.0'],
test_suite='tests',
zip_safe=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Framework :: Flake8',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Testing :: Unit',
],
)
|
python
|
import modules.weapon as weapon
basic_sword = weapon.Weapon("Sword", "A sword you found somewhere.", 10, 0.5, 10, "slash", 1 , "You took the sword.", "You dropped the sword.")
big_axe = weapon.Weapon("Axe", "A big axe you found somewhere.", 20, 0.5, 10, "slash", 1 , "You took the axe.", "You dropped the axe.")
|
python
|
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.shortcuts import get_object_or_404, redirect, render
from django.views.decorators.cache import cache_page
from .forms import CommentForm, PostForm
from .models import Follow, Group, Post, User
from .settings import CACHE_TIME, PAGINATOR_NUM_PAGES
def get_paginator_page(request, items):
paginator = Paginator(items, PAGINATOR_NUM_PAGES)
page_number = request.GET.get('page')
return paginator.get_page(page_number)
@cache_page(CACHE_TIME)
def index(request):
return render(request, 'posts/index.html', {
'page_obj': get_paginator_page(request, Post.objects.all())
})
def group_posts(request, slug):
group = get_object_or_404(Group, slug=slug)
return render(request, 'posts/group_list.html', {
'group': group,
'page_obj': get_paginator_page(request, group.posts.all()),
'is_group': True
})
def profile(request, username):
author = User.objects.get(username=username)
is_following = (
request.user.is_authenticated
and author != request.user
and author.following.filter(user=request.user).exists()
)
return render(request, 'posts/profile.html', {
'page_obj': get_paginator_page(request, author.posts.all()),
'author': author,
'following': is_following
})
def post_detail(request, post_id):
post = get_object_or_404(Post, pk=post_id)
form = CommentForm(request.POST or None)
return render(request, 'posts/post_detail.html', {
'post': post,
'form': form,
'is_post_detail': True
})
@login_required
def post_create(request):
form = PostForm(request.POST or None, files=request.FILES or None)
if not form.is_valid():
return render(request, 'posts/create_post.html', {'form': form})
new_post = form.save(commit=False)
new_post.author = request.user
new_post.save()
return redirect('posts:profile', username=request.user)
def post_edit(request, post_id):
post = get_object_or_404(Post, pk=post_id)
if post.author != request.user:
return redirect('posts:post_detail', post_id)
form = PostForm(
request.POST or None,
files=request.FILES or None,
instance=post
)
if not form.is_valid():
return render(request, 'posts/create_post.html', {
'is_edit': True,
'form': form
})
form.save()
return redirect('posts:post_detail', post_id=post_id)
@login_required
def add_comment(request, post_id):
post = get_object_or_404(Post, pk=post_id)
form = CommentForm(request.POST or None)
if form.is_valid():
comment = form.save(commit=False)
comment.author = request.user
comment.post = post
comment.save()
return redirect('posts:post_detail', post_id=post_id)
@login_required
def follow_index(request):
return render(request, 'posts/follow.html', {
'page_obj': get_paginator_page(
request,
Post.objects.filter(author__following__user=request.user)
)
})
@login_required
def profile_follow(request, username):
author = User.objects.get(username=username)
if not (author == request.user
or author.following.filter(user=request.user).exists()):
Follow.objects.create(
user=request.user,
author=author
)
return redirect('posts:profile', username)
@login_required
def profile_unfollow(request, username):
get_object_or_404(
Follow, user=request.user,
author__username=username
).delete()
return redirect('posts:profile', username)
|
python
|
from numpy import random
from impl.distribution import distribution
class triangular(distribution):
def __init__(self, mini, mode, maxi):
self.mini = float(mini)
self.mode = float(mode)
self.maxi = float(maxi)
def generate(self):
return int(random.triangular(self.mini, self.mode, self.maxi))
|
python
|
from .multiagent_particle_env import RLlibMultiAgentParticleEnv as MultiAgentParticleEnv
__all__ = [
"MultiAgentParticleEnv"
]
|
python
|
from flask import abort, Flask, jsonify, request
import os
import asyncio
import pyjuicenet
import aiohttp
from prettytable import PrettyTable
from pytz import timezone
import datetime
import requests
import database_helper
import html_renderer
app = Flask(__name__)
@app.route("/")
def show_all_chargers():
database_helper.update_chargers()
return html_renderer.print_chargers()
@app.route("/sql")
def show_sql_chargers():
database_helper.update_chargers()
return html_renderer.print_chargers(['980'])
@app.route("/qry")
def show_qry_chargers():
database_helper.update_chargers()
return html_renderer.print_chargers(['Penta-Taj', 'Joby Heights'])
@app.route("/render-qr-codes")
def render_qr_codes():
# will produce a QR code for all chargers in the database
html_renderer.generate_and_save_qr_code(request.url_root)
return html_renderer.qr_codes_to_html()
@app.route("/get-assign-charger-command/<charger_id>")
def get_assign_charger_command(charger_id):
# will return the command to type into slack.
# we should get here after scanning a QR code
return html_renderer.generate_assign_charger_text(charger_id)
# Slack handler
@app.route("/slack/assign-charger", methods=['POST'])
def assign_charger():
team_id, token = request.form.get("team_id"), request.form.get("token")
request_is_valid = validate_request(team_id, token)
response = "Hmm...something went wrong."
if request_is_valid:
user = request.form.get("user_name")
charger_id = request.form.get("text")
if (user and charger_id):
success, charger_name = database_helper.assign_driver(user, charger_id)
if success:
response = f"@{user}, you have been assigned {charger_name}. Find more information about other Joby chargers here: {request.url_root}!"
return jsonify(
response_type='ephemeral',
text=response)
def validate_request(team_id, token):
request_valid = False
if team_id == os.environ['TEAM_ID'] and token == os.environ['SLACK_VERIFICATION_TOKEN']:
request_valid = True
else:
print(f"Request with Team ID: {team_id} and token {token} is not valid.")
return request_valid
app.run()
|
python
|
import pytest
import networkx as nx
from ..pyfastg import add_node_to_digraph
def test_basic():
def check_asdf(g):
assert "asdf" in g.nodes
assert g.nodes["asdf"]["cov"] == 5.2
assert g.nodes["asdf"]["gc"] == 4 / 6.0
assert g.nodes["asdf"]["length"] == 6
def check_ghjasdf(g):
assert "ghjasd" in g.nodes
assert g.nodes["ghjasd"]["cov"] == 100
assert g.nodes["ghjasd"]["gc"] == 1 / 3.0
assert g.nodes["ghjasd"]["length"] == 3
g = nx.DiGraph()
# 1. Add node "asdf" to g
add_node_to_digraph(
g, {"name": "asdf", "cov": 5.2, "seq": "ATCGCC", "length": 6}
)
check_asdf(g)
# 2. Add node "ghjasd" to g
add_node_to_digraph(
g,
{
"name": "ghjasd",
"cov": 100,
"seq": "CAT",
"length": 3,
"outgoing_node_names": ["asdf", "qwerty", "hamborgar"],
},
)
# This should have added three new nodes (ghjasdf, qwerty, hamborgar)
# qwerty and hamborgar, however, don't have any attributes (yet)
# Double-check that asdf's attributes were not somehow lost
check_asdf(g)
check_ghjasdf(g)
assert "qwerty" in g.nodes
assert "hamborgar" in g.nodes
assert ("ghjasd", "asdf") in g.edges
assert ("ghjasd", "qwerty") in g.edges
assert ("ghjasd", "hamborgar") in g.edges
# 3. Add node "hamborgar" to g (it's already in there but is "empty")
add_node_to_digraph(
g, {"name": "hamborgar", "cov": 33.3, "seq": "AAAA", "length": 4}
)
# Again, check that prior nodes' attributes are ok
check_asdf(g)
check_ghjasdf(g)
assert "qwerty" in g.nodes
assert "hamborgar" in g.nodes
assert ("ghjasd", "asdf") in g.edges
assert ("ghjasd", "qwerty") in g.edges
assert ("ghjasd", "hamborgar") in g.edges
assert g.nodes["hamborgar"]["cov"] == 33.3
assert g.nodes["hamborgar"]["gc"] == 0
assert g.nodes["hamborgar"]["length"] == 4
def test_insufficient_attrs():
g = nx.DiGraph()
with pytest.raises(ValueError) as exc_info:
add_node_to_digraph(g, {})
assert "name not present for all nodes" in str(exc_info.value)
with pytest.raises(ValueError) as exc_info:
add_node_to_digraph(g, {"name": "123"})
assert "length not present for all nodes" in str(exc_info.value)
with pytest.raises(ValueError) as exc_info:
add_node_to_digraph(g, {"name": "123", "length": 2})
assert "cov not present for all nodes" in str(exc_info.value)
with pytest.raises(ValueError) as exc_info:
add_node_to_digraph(g, {"name": "123", "length": 2, "cov": 6.3})
assert "seq not present for all nodes" in str(exc_info.value)
# Finally, this should work
add_node_to_digraph(
g, {"name": "123", "length": 2, "cov": 6.3, "seq": "AG"}
)
assert "123" in g.nodes
def test_length_mismatch():
g = nx.DiGraph()
with pytest.raises(ValueError) as exc_info:
add_node_to_digraph(
g, {"name": "asdf", "cov": 5.2, "seq": "A", "length": 6}
)
assert "Length given vs. actual seq. length differs for node asdf" in str(
exc_info.value
)
|
python
|
from rest_framework import serializers
from care.facility.api.serializers import TIMESTAMP_FIELDS
from care.facility.api.serializers.facility import FacilityBasicInfoSerializer
from care.facility.models import PatientConsultation, PatientRegistration, Facility
from care.facility.models.prescription_supplier import PrescriptionSupplier
from care.utils.serializer.external_id_field import ExternalIdSerializerField
from config.serializers import ChoiceField
class MinimalPatientSerializer(serializers.ModelSerializer):
id = serializers.CharField(source="external_id")
class Meta:
model = PatientRegistration
fields = ("id", "name", "phone_number", "address")
class PrescriptionSupplierConsultationSerializer(serializers.ModelSerializer):
id = serializers.CharField(source="external_id", read_only=True)
patient = MinimalPatientSerializer(read_only=True)
class Meta:
model = PatientConsultation
fields = ("id", "prescriptions", "discharge_advice", "patient")
class PrescriptionSupplierSerializer(serializers.ModelSerializer):
id = serializers.CharField(source="external_id", read_only=True)
scheme = ChoiceField(choices=PrescriptionSupplier.SchemeChoices)
status = ChoiceField(choices=PrescriptionSupplier.StatusChoices)
consultation_object = PrescriptionSupplierConsultationSerializer(source="consultation", read_only=True)
facility_object = FacilityBasicInfoSerializer(source="facility", read_only=True)
consultation = ExternalIdSerializerField(required=True, queryset=PatientConsultation.objects.all())
facility = ExternalIdSerializerField(required=True, queryset=Facility.objects.all())
class Meta:
model = PrescriptionSupplier
exclude = ("deleted", "external_id")
read_only_fields = TIMESTAMP_FIELDS
def create(self, validated_data):
instance = super().create(validated_data)
instance.updated_user = self.context["request"].user
instance.save()
def update(self, instance, validated_data):
instance = super().update(instance, validated_data)
instance.updated_user = self.context["request"].user
instance.save()
return instance
|
python
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from azure_devtools.perfstress_tests import PerfStressTest
from azure.identity import ClientSecretCredential, TokenCachePersistenceOptions
from azure.identity.aio import ClientSecretCredential as AsyncClientSecretCredential
try:
from dotenv import load_dotenv
load_dotenv()
except ImportError:
pass
class PersistentCacheRead(PerfStressTest):
def __init__(self, arguments):
super().__init__(arguments)
client_id = self.get_from_env("AZURE_CLIENT_ID")
tenant_id = self.get_from_env("AZURE_TENANT_ID")
secret = self.get_from_env("AZURE_CLIENT_SECRET")
cache_options = TokenCachePersistenceOptions(allow_unencrypted_storage=True)
self.credential = ClientSecretCredential(tenant_id, client_id, secret, cache_persistence_options=cache_options)
self.async_credential = AsyncClientSecretCredential(
tenant_id, client_id, secret, cache_persistence_options=cache_options
)
self.scope = "https://vault.azure.net/.default"
async def global_setup(self):
"""Cache an access token"""
await super().global_setup()
self.credential.get_token(self.scope)
await self.async_credential.get_token(self.scope)
def run_sync(self):
self.credential.get_token(self.scope)
async def run_async(self):
await self.async_credential.get_token(self.scope)
async def close(self):
await self.async_credential.close()
await super().close()
|
python
|
import re, hashlib, random, json, csv, sys
from datetime import datetime, timedelta, tzinfo
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.cache import caches
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.core.files.uploadhandler import MemoryFileUploadHandler
from django.core.validators import validate_email
from django.db.models import ProtectedError
from django.forms import ValidationError
from django.forms.models import modelformset_factory, inlineformset_factory
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse
from django.shortcuts import render_to_response, get_object_or_404, redirect, render
from django.template import RequestContext
from django.utils.datastructures import MultiValueDictKeyError
from django.views.defaults import page_not_found, permission_denied, bad_request
from itertools import chain
from polls import models
from polls.includes import forms, email_messages
from pprint import pprint
#################################################
#### PASO DE MENSAJES Y PARAMETROS POR CACHE ####
#################################################
# Crea un mensaje que se mostrara en la siguiente pagina
def set_cache_message(user, msg_type, msg):
if not user.is_authenticated():
return
cache = caches['default']
if (msg_type == 'error'):
key = 'error_msg'
elif (msg_type == 'warning'):
key = 'warning_msg'
elif (msg_type == 'success'):
key = 'success_msg'
else:
key = 'info_msg'
key = hashlib.sha256(('%d_%s' % (user.pk, key)).encode('utf-8')).hexdigest()
cache.set(key, msg)
# Lee el contenido de las variables de mensaje si existen.
def caches_messages(user):
if not user.is_authenticated():
return
cache = caches['default']
# Construyo las claves
error_key = hashlib.sha256(("%d_error_msg" % user.pk).encode('utf-8')).hexdigest()
warning_key = hashlib.sha256(("%d_warning_msg" % user.pk).encode('utf-8')).hexdigest()
success_key = hashlib.sha256(("%d_success_msg" % user.pk).encode('utf-8')).hexdigest()
info_key = hashlib.sha256(("%d_info_msg" % user.pk).encode('utf-8')).hexdigest()
# Recojo los mensajes
error_msg = cache.get(error_key, None);
warning_msg = cache.get(warning_key, None);
success_msg = cache.get(success_key, None);
info_msg = cache.get(info_key, None);
# Limpio las variables
cache.set(error_key, None);
cache.set(warning_key, None);
cache.set(success_key, None);
cache.set(info_key, None);
return error_msg, warning_msg, success_msg, info_msg
def set_cache_param(user, name, value):
if not user.is_authenticated():
return
cache = caches['default']
key = hashlib.sha256(('%d_%s' % (user.pk, name)).encode('utf-8')).hexdigest()
cache.set(key, value)
def caches_param(user, name):
if not user.is_authenticated():
return
cache = caches['default']
key = hashlib.sha256(('%d_%s' % (user.pk, name)).encode('utf-8')).hexdigest()
param = cache.get(key, None)
cache.set(key, None)
return param
#################################################
#################################################
def login_view(request):
login_active = "active"
login_form = forms.LoginForm()
reg_form = forms.RegisterForm()
js_actions = "$('#registerForm').modal('hide')"
error_msg = ''
register_error = ''
info_msg = ''
if request.user is not None and request.user.is_active:
try:
redir = request.GET['next'];
except MultiValueDictKeyError:
redir = '/polls/home/';
return HttpResponseRedirect(redir)
if (request.method == 'POST'):
if (request.POST['wichform'] == 'registration'):
reg_form = forms.RegisterForm(request.POST)
if reg_form.is_valid():
password = request.POST['password']
first_name = request.POST['first_name']
last_name = request.POST['last_name']
email = request.POST['email']
new_user = User.objects.create_user(username=email, password=password, first_name=first_name, last_name=last_name, email=email)
new_user.is_active = False
new_user.save()
# Send activation email
salt = hashlib.sha256(str(random.getrandbits(256)).encode('utf-8')).hexdigest()[:5]
activation_key = hashlib.sha256((salt+email).encode('utf-8')).hexdigest()
key_expires = datetime.now() + timedelta(2)
new_user_profile = models.UserProfile(user=new_user, activation_key=activation_key, key_expires=key_expires)
new_user_profile.save()
new_user_profile.send_activation_email()
reg_form = forms.RegisterForm()
info_msg = "Thank you for your registration. You will now receive an activation email. Please activate your account within the next 48 hours."
else:
js_actions = "$('#registerForm').modal('show')"
else:
login_form = forms.LoginForm(request.POST)
email = request.POST['email']
password = request.POST['password']
user = authenticate(username=email, password=password)
if user is not None:
if user.is_active:
login(request, user)
try:
redir = request.GET['next'];
except MultiValueDictKeyError:
redir = '/polls/home/';
return HttpResponseRedirect(redir)
else:
info_msg = 'Your user has not been activated yet. If the problem persist, please contact us.'
else:
error_msg = 'Wrong username or password. Please try again.'
return render(
request,
'polls/login.html',
context={
'login_form': login_form,
'reg_form': reg_form,
'error_msg': error_msg,
'info_msg': info_msg,
'js_actions': js_actions,
'login_active': login_active,
'register_error': register_error
}
)
def logout_view(request):
logout(request)
return HttpResponseRedirect('/polls/login/')
def activate_account(request, activation_key):
msg = ''
user_profile = None
status = False
try:
user_profile = models.UserProfile.objects.get(activation_key=activation_key)
status = user_profile.activate_account(activation_key)
if not status:
msg = 'Sorry, your activation link has expired. Please register again.'
else:
msg = 'Congratulatins! You have activated your account succesfully. You can now login into BBPolls.'
except ObjectDoesNotExist:
msg = "Sorry, your account could not be found or you have already activated your account."
return render(request, 'polls/activate_account.html',
{'user_profile':user_profile, 'msg':msg, 'status':status});
@login_required(login_url='/polls/login')
def polls_index(request):
mypolls_active = 'active'
js_file = "polls_index.js"
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
published_polls = models.Poll.objects.filter(user=request.user, poll_status=models.Poll.ST_PUBLISHED).order_by("publish_date")
draft_polls = models.Poll.objects.filter(user=request.user, poll_status=models.Poll.ST_DRAFT).order_by("-last_modified")
archived_polls = models.Poll.objects.filter(user=request.user, poll_status=models.Poll.ST_ARCHIVED).order_by("-archive_date")
# send_poll_form = forms.SendPollForm()
error_msg, warning_msg, success_msg, info_msg = caches_messages(request.user)
return render(request, 'polls/polls_index.html',
{'published_polls':published_polls,
'username':request.user.username,
'draft_polls':draft_polls,
'archived_polls':archived_polls,
'error_msg':error_msg,
'warning_msg':warning_msg,
'success_msg':success_msg,
'info_msg':info_msg,
'js_file':js_file,
'send_poll_form':forms.SendPollForm(),
'is_pollster':is_pollster,
'mypolls_active':mypolls_active});
@login_required(login_url='/polls/login')
def send_poll(request, poll_id):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id)
except ObjectDoesNotExist:
set_cache_message(request.user, "error", "Sorry! Poll not found")
return HttpResponseRedirect("/polls/my-polls/")
# send_poll_form = forms.SendPollForm(request.POST)
if (request.method == 'POST'):
emails_text = request.POST["emails"]
emails = []
for line_emails in emails_text.splitlines():
line_emails = line_emails.strip()
if (',' in line_emails):
splited_line = line_emails.split(",")
for e in splited_line:
e = e.strip()
if e != "":
try:
validate_email(e)
emails.append(e.strip())
except ValidationError:
continue
elif (';' in line_emails):
splited_line = line_emails.split(";")
for e in splited_line:
e = e.strip()
if e != "":
try:
validate_email(e)
emails.append(e.strip())
except ValidationError:
continue
elif (' ' in line_emails):
splited_line = line_emails.split(" ")
for e in splited_line:
e = e.strip()
if e != "":
try:
validate_email(e)
emails.append(e.strip())
except ValidationError:
continue
elif(line_emails != ""):
try:
validate_email(line_emails)
emails.append(line_emails.strip())
except ValidationError:
continue
emails = list(set(emails))
if not emails:
set_cache_message(request.user, "warning", "No emails were found")
else:
poll.send_poll(emails)
set_cache_message(request.user, "success", "Invitations sent!")
return HttpResponseRedirect("/polls/my-polls/")
@login_required(login_url='/polls/login')
def publish_poll(request, poll_id):
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id)
if (poll.poll_status == models.Poll.ST_ARCHIVED):
set_cache_message(request.user, "error", "Sorry! An archived poll cannot be unarchived")
return HttpResponseRedirect("/polls/my-polls/")
elif (not poll.is_doable):
set_cache_message(request.user, "error", "Sorry! Is not possible to publish this poll. At least one question in this poll that cannot be proeprly answered")
return HttpResponseRedirect("/polls/my-polls/")
except ObjectDoesNotExist:
set_cache_message(request.user, "error", "Sorry! Poll not found")
return HttpResponseRedirect("/polls/my-polls/")
pprint("PUBLISH Current status: %s" % poll.poll_status, sys.stdout)
poll.poll_status = models.Poll.ST_PUBLISHED
poll.publish_date = datetime.now()
poll.save()
pprint("PUBLISH Current status: %s" % poll.poll_status, sys.stdout)
return HttpResponseRedirect('/polls/my-polls/')
@login_required(login_url='/polls/login')
def archive_poll(request, poll_id):
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id)
if (poll.poll_status == models.Poll.ST_DRAFT):
set_cache_message(request.user, "error", "Sorry! Only published polls may be archived")
return HttpResponseRedirect("/polls/my-polls/")
except ObjectDoesNotExist:
set_cache_message(request.user, "error", "Sorry! Poll not found")
return HttpResponseRedirect("/polls/my-polls/")
models.Response.objects.filter(poll=poll, is_completed=False).delete()
pprint("ARCHIVE Current status: %s" % poll.poll_status, sys.stdout)
poll.poll_status = models.Poll.ST_ARCHIVED
poll.archive_date = datetime.now()
poll.save()
pprint("ARCHIVE Current status: %s" % poll.poll_status, sys.stdout)
return HttpResponseRedirect('/polls/my-polls/')
@login_required(login_url='/polls/login')
def unpublish_poll(request, poll_id):
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id)
if (poll.poll_status == models.Poll.ST_ARCHIVED):
set_cache_message(request.user, "error", "Sorry! An archived poll cannot be unarchived")
return HttpResponseRedirect("/polls/my-polls/")
except ObjectDoesNotExist:
set_cache_message(request.user, "error", "Sorry! Poll not found")
return HttpResponseRedirect("/polls/my-polls/")
if (models.Response.objects.filter(poll=poll, is_completed=True)):
set_cache_message(request.user, "error", "Sorry! This poll has already been answered and cannot be unpublish.")
return HttpResponseRedirect("/polls/my-polls/")
models.Response.objects.filter(poll=poll, is_completed=False).delete()
pprint("UNPUBLISH Current status: %s" % poll.poll_status, sys.stdout)
poll.poll_status = models.Poll.ST_DRAFT
poll.save()
pprint("UNPUBLISH New status: %s" % poll.poll_status, sys.stdout)
return HttpResponseRedirect('/polls/my-polls/')
@login_required(login_url='/polls/login')
def create_poll(request):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
mypolls_active = 'active'
js_actions = "$('[data-toggle=\"tooltip\"]').tooltip({html: true})"
create_form = forms.PollCreateForm(request.POST or None, prefix="create");
import_form = forms.PollImportForm(request.POST or None, request.FILES or None, prefix="import")
if(request.method == 'POST'): # Create
if ('create' in request.POST):
if create_form.is_valid():
poll_name = request.POST['create-name'];
p = models.Poll(name=poll_name, user=request.user)
p.save();
return HttpResponseRedirect('/polls/manage-poll/%d/' % p.pk)
elif ('import' in request.POST): # Import
if (import_form.is_valid()):
# Check size
data = b''
for chunk in request.FILES['import-import_file'].chunks():
data+=chunk
json_data = json.loads(data)
try:
poll = models.Poll.import_poll(json_data, request.user)
return HttpResponseRedirect('/polls/manage-poll/%d/' % poll.pk)
except ValidationError as ve:
import_form.errors["import_file"] = [ve.messages[0]]
return render(request, 'polls/create-poll.html',
{'create_form':create_form,
'username':request.user.username,
'import_form':import_form,
'js_actions':js_actions,
'is_pollster':is_pollster,
'mypolls_active':mypolls_active});
@login_required(login_url='/polls/login')
def manage_poll(request, poll_id):
mypolls_active = 'active'
js_file = "manage-poll.js"
js_actions = "$('[data-toggle=\"tooltip\"]').tooltip({html: true});"
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
scroll = caches_param(request.user, "scroll")
pprint(scroll, sys.stderr)
if scroll:
js_actions += "$('body').scrollTop(%s)" % scroll
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
poll_form = forms.PollForm(instance = poll)
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "Sorry! The poll you are trying to access does not exist anymore.");
return HttpResponseRedirect("/polls/my-polls/")
can_edit = poll.poll_status == models.Poll.ST_DRAFT;
if not can_edit:
poll_form.disable()
question_queryset = models.Question.objects.filter(poll=poll).order_by('order');
if (request.method == 'POST' and can_edit):
poll_form = forms.PollForm(request.POST, instance=poll)
if poll_form.is_valid():
poll_form.save()
return HttpResponseRedirect('/polls/my-polls/')
error_msg, warning_msg, success_msg, info_msg = caches_messages(request.user)
return render(request, 'polls/manage-poll.html',
{'poll_form':poll_form,
'username':request.user.username,
'question_queryset':question_queryset,
'poll':poll,
'js_file':js_file,
'js_actions':js_actions,
'error_msg':error_msg,
'warning_msg':warning_msg,
'success_msg':success_msg,
'info_msg':info_msg,
'mypolls_active':mypolls_active,
'is_pollster':is_pollster,
'can_edit':can_edit});
@login_required(login_url='/polls/login')
def add_question(request, poll_id):
mypolls_active = 'active'
js_actions = "$('[data-toggle=\"tooltip\"]').tooltip({html: true})"
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "The poll you are trying to create a question within, does not exist anymore.");
return HttpResponseRedirect("/polls/my-polls/")
try:
response = models.Response.objects.get(poll=poll, is_completed=True)
set_cache_message(request.user, 'error', "Sorry! The poll has been already answered and cannot be edited.");
return HttpResponseRedirect("/polls/my-polls/")
except ObjectDoesNotExist:
pass
question_form = forms.AddQuestionForm(request.POST or None)
BaseChoiceFormset = inlineformset_factory(models.Question, models.Choice, form=forms.ChoiceForm, extra=3, can_delete=False)
choice_formset = BaseChoiceFormset()
if (request.method == 'POST'):
if (request.POST['submit'] == 'Save'):
if question_form.is_valid():
new_question = question_form.save(commit=False)
new_question.poll = poll
new_question.save()
choice_formset = BaseChoiceFormset(request.POST, instance=new_question)
if choice_formset.is_valid():
choice_formset.save()
set_cache_message(request.user, 'success', 'New question created')
return HttpResponseRedirect('/polls/manage-poll/%s/' % poll_id)
elif(request.POST['submit'] == 'Save and add new'):
if question_form.is_valid():
new_question = question_form.save(commit=False)
new_question.poll = poll
new_question.save()
choice_formset = BaseChoiceFormset(request.POST, instance=new_question)
if choice_formset.is_valid():
choice_formset.save()
question_form = forms.QuestionForm(None)
set_cache_message(request.user, 'success', 'New question created')
return HttpResponseRedirect('/polls/manage-poll/%s/add-question/' % poll_id)
else:
more_choices = request.POST['number-choices']
if not more_choices:
more_choices=0
try:
more_choices = int(more_choices)
if more_choices < 0:
more_choices = 0
BaseChoiceFormset = inlineformset_factory(models.Question, models.Choice, form=forms.ChoiceForm, extra=more_choices+3, can_delete=False)
except ValueError:
BaseChoiceFormset = inlineformset_factory(models.Question, models.Choice, form=forms.ChoiceForm, extra=3, can_delete=False)
choice_formset = BaseChoiceFormset()
error_msg, warning_msg, success_msg, info_msg = caches_messages(request.user)
return render(request, 'polls/manage-question.html',
{'question_form':question_form,
'username':request.user.username,
'poll':poll, 'choice_formset':choice_formset,
'question_index':poll.number_questions+1,
'create_question':True,
'js_actions':js_actions,
'error_msg':error_msg,
'warning_msg':warning_msg,
'success_msg':success_msg,
'info_msg':info_msg,
'is_pollster':is_pollster,
'mypolls_active':mypolls_active});
@login_required(login_url='/polls/login')
def increase_question_order(request, poll_id, question_id, scroll):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
mypolls_active = 'active'
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
question = models.Question.objects.get(pk=question_id, poll=poll)
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "Sorry! The question you are trying to increase order to does not exist anymore");
return HttpResponseRedirect("/polls/manage-poll/%s/" % poll_id)
pprint(scroll, sys.stderr)
if scroll:
set_cache_param(request.user, "scroll", scroll)
question.increase_order();
return HttpResponseRedirect("/polls/manage-poll/%s/" % poll_id);
@login_required(login_url='/polls/login')
def decrease_question_order(request, poll_id, question_id, scroll):
mypolls_active = 'active'
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
question = models.Question.objects.get(pk=question_id, poll=poll)
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "Sorry! The question you are trying to decrease order to does not exist anymore");
return HttpResponseRedirect("/polls/manage-poll/%s/" % poll_id)
pprint(scroll, sys.stderr)
if scroll:
set_cache_param(request.user, "scroll", scroll)
question.decrease_order();
return HttpResponseRedirect('/polls/manage-poll/%s/'% poll_id);
@login_required(login_url='/polls/login')
def manage_question(request, poll_id, question_id):
mypolls_active = 'active'
manage_only = 'manage-only'
js_file = "manage-question.js"
js_actions = "$('[data-toggle=\"tooltip\"]').tooltip({html: true})"
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
question = models.Question.objects.get(pk=question_id, poll=poll)
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "The question you are trying to delete does not exist anymore.")
return HttpResponseRedirect("/polls/manage-poll/%s/" % poll_id)
can_edit = poll.poll_status == models.Poll.ST_DRAFT;
i = 0;
for q in models.Question.objects.filter(poll=poll):
i +=1;
if (q.pk == question.pk):
break;
BaseChoiceFormset = inlineformset_factory(models.Question, models.Choice, form=forms.ChoiceForm, extra=0)
multimedia_sources = models.MultimediaSource.objects.filter(question=question).order_by('media_type')
choice_formset = BaseChoiceFormset(request.POST or None, instance=question)
if (request.method == 'POST' and can_edit):
if (request.POST['submit'] == 'Save'):
question_form = forms.QuestionForm(request.POST, instance=question)
if question_form.is_valid():
question = question_form.save()
if choice_formset.is_valid():
choice_formset.save();
return HttpResponseRedirect('/polls/manage-poll/%s/' % poll_id)
else:
more_choices = request.POST['number-choices']
if not more_choices:
more_choices=0
try:
BaseChoiceFormset = inlineformset_factory(models.Question, models.Choice, form=forms.ChoiceForm, extra=int(more_choices))
except ValueError:
BaseChoiceFormset = inlineformset_factory(models.Question, models.Choice, form=forms.ChoiceForm, extra=0)
question_form = forms.QuestionForm(instance=question)
choice_formset = BaseChoiceFormset(instance=question)
if not can_edit:
question_form.disable()
for choice_form in choice_formset:
choice_form.disable()
video_message = "You have %d video sources available" % question.number_video_srcs
if (question.number_video_srcs > 0):
video_class = "alert-success"
else:
video_class = "alert-danger"
audio_message = "You have %d audio sources available" % question.number_audio_srcs
if (question.number_audio_srcs > 0):
audio_class = "alert-success"
else:
audio_class = "alert-danger"
image_message = "You have %d image sources available" % question.number_image_srcs
if (question.number_image_srcs > 0):
image_class = "alert-success"
else:
image_class = "alert-danger"
iframe_message = "You have %d iframe sources available" % question.number_iframe_srcs
if (question.number_iframe_srcs > 0):
iframe_class = "alert-success"
else:
iframe_class = "alert-danger"
error_msg, warning_msg, success_msg, info_msg = caches_messages(request.user)
return render(request, 'polls/manage-question.html',
{'question_form':question_form,
'username':request.user.username,
'poll':poll,
'question_index':i,
'question_pk':question_id,
'choice_formset':choice_formset,
'multimedia_sources':multimedia_sources,
'manage_only':manage_only,
'mypolls_active':mypolls_active,
'create_question':False,
'error_msg':error_msg,
'warning_msg':warning_msg,
'success_msg':success_msg,
'info_msg':info_msg,
'image_message': image_message,
'image_class': image_class,
'audio_message': audio_message,
'audio_class': audio_class,
'video_message': video_message,
'video_class': video_class,
'iframe_message': iframe_message,
'iframe_class': iframe_class,
'js_file': js_file,
'js_actions' : js_actions,
'is_pollster':is_pollster,
'can_edit':can_edit});
@login_required(login_url='/polls/login')
def clone_poll(request, poll_id):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
poll.clone()
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "Sorry! The poll you are trying to clone does not exist anymore.")
return HttpResponseRedirect('/polls/my-polls/')
@login_required(login_url='/polls/login')
def remove_poll(request, poll_id):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
poll.delete()
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "Sorry! The poll you are trying to clone does not exist anymore.")
except ProtectedError:
set_cache_message(request.user, 'error', "Sorry! The poll has been already answered and cannot be removed.")
return HttpResponseRedirect('/polls/my-polls/')
@login_required(login_url='/polls/login')
def remove_question(request, poll_id, question_id):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
question = models.Question.objects.get(pk=question_id, poll=poll)
question.delete()
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "The question you are trying to delete does not exist anymore.")
return HttpResponseRedirect('/polls/manage-poll/%s/' % poll_id)
try:
response = models.Response.objects.get(poll=poll, is_completed=True)
set_cache_message(request.user, 'error', "Sorry! The poll have been already answered and cannot be edited.")
return HttpResponseRedirect('/polls/my-polls/')
except ObjectDoesNotExist:
pass
set_cache_message(request.user, 'success', "Question successfully removed")
return HttpResponseRedirect('/polls/manage-poll/%s/' % poll.pk)
@login_required(login_url='/polls/login')
def add_multimedia_source(request, poll_id, question_id, source='url'):
mypolls_active = "active"
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
question = models.Question.objects.get(pk=question_id, poll=poll)
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "The question you are trying to delete does not exist anymore.")
return HttpResponseRedirect('/polls/manage-poll/%s/' % poll_id)
try:
response = models.Response.objects.get(poll=poll, is_completed=True)
set_cache_message(request.user, 'error', "Sorry! The poll has been already answered and cannot be edited.");
return HttpResponseRedirect("/polls/my-polls/")
except ObjectDoesNotExist:
pass
i = 0;
for q in models.Question.objects.filter(poll=poll):
i +=1;
if (q.pk == question.pk):
break;
if (source == 'url'):
if (request.method == 'POST'):
multimedia_form = forms.MultimediaSourceFormURL(request.POST)
if multimedia_form.is_valid():
try:
mmsrc = multimedia_form.save(commit=False)
mmsrc.question = question
mmsrc.validate_mime_type()
mmsrc.save()
set_cache_message(request.user, 'success', "Multimedia source successfully created")
return HttpResponseRedirect('/polls/manage-poll/%s/manage-question/%s/' % (poll.pk, question.pk))
except ValidationError as ve:
multimedia_form = forms.MultimediaSourceFormURL(request.POST)
multimedia_form.errors["url_source"] = [ve.messages[0]]
else:
multimedia_form = forms.MultimediaSourceFormURL()
elif (source == 'file'):
pass
else:
pass
return render(request, 'polls/add-multimedia-source.html',
{'multimedia_form':multimedia_form,
'username':request.user.username,
'poll':poll,
'question':question,
'question_index':i,
'mypolls_active':mypolls_active})
@login_required(login_url='/polls/login')
def remove_multimedia_source(request, poll_id, question_id, mmsrc_id):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
question = models.Question.objects.get(pk=question_id, poll=poll)
mmsrc = models.MultimediaSource.objects.get(pk=mmsrc_id, question=question)
mmsrc.delete()
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "The source you are trying to delete does not exist anymore.")
return HttpResponseRedirect("/polls/manage-poll/%s/manage-question/%s/" % (poll.pk, question.pk))
set_cache_message(request.user, 'success', "Multimedia source successfully removed")
return HttpResponseRedirect("/polls/manage-poll/%s/manage-question/%s/" % (poll.pk, question.pk))
def do_survey(request, poll_id, try_poll=False, invitation_key=None):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
try:
poll = models.Poll.objects.get(pk=poll_id)
except ObjectDoesNotExist:
if try_poll:
set_cache_message(request.user, 'error', "Poll not found.")
return HttpResponseRedirect('/polls/my-polls/')
else:
set_cache_message(request.user, 'error', "Poll not found.")
return HttpResponseRedirect('/polls/home/')
# Comprobamos que el usuario tenga permisos parar acceder
if (invitation_key is not None):
try:
poll_invitation = models.AnonymousInvitation.objects.get(poll=poll, key=invitation_key)
anonymous_poll = True
if (poll_invitation.response is not None and poll_invitation.response.is_completed):
return HttpResponseRedirect('/polls/login/')
except ObjectDoesNotExist:
return HttpResponseRedirect('/polls/login/?next=/polls/do-poll/%d/' % poll.pk)
elif (request.user.is_authenticated()):
print("auth user")
if (poll.access_type != models.Poll.AT_PUBLIC):
if ((request.user not in poll.allowed_users.all()
and request.user.groups not in poll.allowed_groups.all())
and (request.user != poll.user and not try_poll)):
print("not allowed user")
set_cache_message(request.user, 'error', "Sorry! You don't have permission to access this poll.")
return HttpResponseRedirect('/polls/home/')
anonymous_poll = False;
else:
print("neither invitation_key, neither allowed_user")
return HttpResponseRedirect('/polls/login/?next=%s' % request.path)
if (poll.randomize_questions):
questions = models.Question.objects.filter(poll=poll).order_by('?')
else:
questions = models.Question.objects.filter(poll=poll).order_by('order')
choices = models.Choice.objects.filter(question__in=questions)
if (not anonymous_poll):
try:
response = models.Response.objects.get(poll=poll, user=request.user)
except ObjectDoesNotExist:
response = None
else:
response = poll_invitation.response
error_msg = None;
if (request.method == 'POST') and not try_poll:
if response is not None:
response.choices.clear()
models.Verbatim.objects.filter(response=response).delete()
else:
try:
response = models.Response(poll=poll, user=request.user)
except ValueError:
if anonymous_poll:
response = models.Response(poll=poll, user=None)
poll_invitation.response = response
poll_invitation.save()
else:
set_cache_message(request.user, 'error', "Unexpected error occurred when attempting to save your response. Please contact the administrator.")
return HttpResponseRedirect(request.path)
response.save()
for field, value in request.POST.items():
if re.match('^q\d*_choice\d*$', field) == None:
continue
try:
choice = models.Choice.objects.get(pk=int(value), question__poll=poll)
response.choices.add(choice)
if not choice.is_fixed:
v = models.Verbatim(response=response, choice=choice, verbatim=request.POST['%s_verbatim' % choice.pk])
v.save()
except (ObjectDoesNotExist, ValueError):
error_msg = "Corrupted data, please try again."
break
if error_msg:
set_cache_message(request.user, "error", error_msg)
response.delete();
else:
if request.user.is_authenticated():
error_msg, warning_msg, success_msg, info_msg = caches_messages(request.user)
cs = response.choices.all()
completed = True
for q in models.Question.objects.filter(poll=poll):
if not cs.exists():
completed = False
break
cs = cs.exclude(question=q)
if completed: # Complete also saves the Response
set_cache_message(request.user, "success", "You have successfully completed the poll. Thank you!")
response.set_complete()
else:
set_cache_message(request.user, "info", "The poll has not been completed. You may finish it in the \"Ongoing Polls\" section at the home page.")
return HttpResponseRedirect('/polls/home/')
elif request.user.is_authenticated():
error_msg, warning_msg, success_msg, info_msg = caches_messages(request.user)
if anonymous_poll:
template = "non-auth-do_survey.html"
username = None
else:
template = "do_survey.html"
username = request.user.username
return render(request, 'polls/%s' % template,
{'poll':poll,
'username':username,
'questions':questions,
'choices':choices,
'response':response,
'try_poll':try_poll,
'error_msg':error_msg,
'anonymous_poll':anonymous_poll,
'is_pollster':is_pollster});
@login_required(login_url='/polls/login')
def review_survey(request, poll_id):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
try:
poll = models.Poll.objects.get(pk=poll_id)
response = models.Response.objects.get(poll=poll, user=request.user)
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "You have not completed this poll yet.")
return HttpResponseRedirect('/polls/home/')
if (not response.is_completed):
set_cache_message(request.user, 'error', "You have not completed this poll yet.")
return HttpResponseRedirect('/polls/home/')
questions = models.Question.objects.filter(poll=poll)
choices = models.Choice.objects.filter(question__in=questions)
error_msg, warning_msg, success_msg, info_msg = caches_messages(request.user)
return render(request, 'polls/review_survey.html',
{'response':response,
'username':request.user.username,
'choices':choices,
'questions':questions,
'poll':poll,
'error_msg':error_msg,
'is_pollster':is_pollster});
@login_required(login_url='/polls/login')
def remove_response(request, poll_id):
try:
response = models.Response.objects.get(poll__pk=poll_id, user=request.user)
response.delete()
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "You have not completed this poll yet.")
return HttpResponseRedirect('/polls/home/')
@login_required(login_url='/polls/login')
def home(request):
home_active = 'active'
# Checking pollster permission
try:
g = request.user.groups.get(name='sys_pollsters')
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
public_polls = models.Poll.objects.filter(poll_status=models.Poll.ST_PUBLISHED, access_type=models.Poll.AT_PUBLIC, is_finished=False).exclude(user=request.user)
restricted_polls = models.Poll.objects.filter(poll_status=models.Poll.ST_PUBLISHED, access_type=models.Poll.AT_RESTRICTED, allowed_groups__in=request.user.groups.all(), is_finished=False).exclude(user=request.user)
private_polls = models.Poll.objects.filter(poll_status=models.Poll.ST_PUBLISHED, access_type=models.Poll.AT_PRIVATE, allowed_users=request.user, is_finished=False).exclude(user=request.user)
if (public_polls or restricted_polls or private_polls):
available_polls = list(chain(public_polls, private_polls, restricted_polls))
else:
available_polls = None
responses = models.Response.objects.filter(user=request.user)
completed_polls = responses.exclude(is_completed=False)
ongoing_polls = responses.exclude(is_completed=True, poll__is_finished=False)
pprint("available_polls before: ", stream=sys.stderr)
pprint(available_polls, stream=sys.stderr)
if responses.exists() and available_polls is not None:
for response in responses:
if response.poll in available_polls:
available_polls.remove(response.poll)
pprint("available_polls after: ", stream=sys.stderr)
pprint(available_polls, stream=sys.stderr)
error_msg, warning_msg, success_msg, info_msg = caches_messages(request.user)
return render(request, 'polls/home.html',
{'available_polls':available_polls,
'username':request.user.username,
'completed_polls':completed_polls,
'ongoing_polls':ongoing_polls,
'error_msg':error_msg,
'warning_msg':warning_msg,
'success_msg':success_msg,
'info_msg':info_msg,
'home_active':home_active,
'is_pollster':is_pollster});
@login_required(login_url='/polls/login')
def view_stats(request, poll_id):
mypolls_active = "active"
css_file = "view_stats.css"
js_file = "view_stats.js"
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "Sorry! The poll you are trying to see the statistics from, does not exists anymore.")
return HttpResponseRedirect("/polls/manage-poll/%s/" % poll_id)
questions = models.Question.objects.filter(poll=poll)
choices = models.Choice.objects.filter(question__in=questions)
verbatims = models.Verbatim.objects.filter(choice__in=choices)
print("Preguntas: %d" % questions.count())
print("Opciones: %d" % choices.count())
print("Verbatims: %d" % verbatims.count())
return render(request, 'polls/view_stats.html',
{'poll':poll,
'username':request.user.username,
'questions':questions,
'choices':choices,
'verbatims':verbatims,
'css_file':css_file,
'mypolls_active':mypolls_active,
'is_pollster':is_pollster,
'js_file':js_file});
@login_required(login_url='/polls/login')
def account(request):
account_active = 'active'
password_error = None
user_error = None
user_form = forms.UserProfileForm(instance=request.user)
password_form = forms.PasswordChangeForm()
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
if (request.method == 'POST'):
if (request.POST['submit'] == 'Save'):
user_form = forms.UserProfileForm(request.POST, instance=request.user)
if user_form.is_valid():
user_form.save()
else:
password_form = forms.PasswordChangeForm(request.POST)
if password_form.is_valid():
old_password = password_form.cleaned_data['old_password']
password = password_form.cleaned_data['password']
cpassword = password_form.cleaned_data['confirm_password']
if not (request.user.check_password(old_password)):
password_error = 'Wrong password. Please try again.'
elif (password != cpassword):
password_error = "New passwords don't match. Please try again."
else:
request.user.set_password(password)
request.user.save()
return render(request, 'polls/account.html',
{'user_form':user_form,
'password_form': password_form,
'account_active':account_active,
'username':request.user.username,
'password_error':password_error,
'user_error':user_error,
'is_pollster':is_pollster});
def about(request):
about_active = 'active'
is_pollster = False
if request.user.is_authenticated():
template = 'polls/about.html'
username = request.user.username
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
else:
username = ''
template = 'polls/non-auth-about.html'
return render(request, template, {'about_active':about_active, 'is_pollster':is_pollster, 'username':username});
def contact(request):
contact_active = 'active'
is_pollster = False
if request.user.is_authenticated():
template = 'polls/contact.html'
username = request.user.username
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
pass
else:
template = 'polls/non-auth-contact.html'
username = ''
return render(request, template, {'contact_active':contact_active, 'is_pollster':is_pollster, 'username':username});
@login_required(login_url='/polls/login')
def export_poll(request, poll_id):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
except ObjectDoesNotExist:
set_cache_message(request.user, 'error', "Sorry! The poll you are trying to export does not exist anymore.")
return HttpResponseRedirect("/polls/my-polls/%s/" % poll_id)
poll_json = poll.get_json()
json_response = JsonResponse(poll_json, safe=False)
json_response['Content-Disposition'] = 'attachment; filename=%s.json' % poll.name
return json_response
@login_required(login_url='/polls/login')
def get_csv_stats(request, poll_id, delimiter=','):
# Checking pollster permission
try:
g = request.user.groups.get(name="sys_pollsters")
is_pollster = True
except ObjectDoesNotExist:
is_pollster = False
set_cache_message(request.user, "error", "Sorry, you don't have permission to access this area. Redirecting to home page...")
return HttpResponseRedirect('/polls/home/')
try:
poll = models.Poll.objects.get(pk=poll_id, user=request.user)
poll_csv = poll.get_responses_csv()
csv_response = HttpResponse(content_type='text/csv')
csv_response['Content-Disposition'] = 'attachment; filename=%s_stats.csv' % poll.name
writer = csv.writer(csv_response, delimiter=delimiter)
writer.writerows(poll_csv)
return csv_response
except ObjectDoesNotExist:
return page_not_found(request)
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-05-21 08:19
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('photos', '0005_photolikes'),
]
operations = [
migrations.RemoveField(
model_name='followers',
name='insta',
),
migrations.RemoveField(
model_name='followers',
name='user_id',
),
migrations.AddField(
model_name='followers',
name='follower',
field=models.CharField(default='', max_length=20),
),
migrations.AlterField(
model_name='followers',
name='user',
field=models.CharField(default='', max_length=20),
),
]
|
python
|
class Record:
vdict = dict()
count = 0
record = dict()
reclen = 0
fd = None
# The constructor opens the Record Defenition File and sets
# the record defenition
def __init__(self, recName, fileName, mode="r", encoding="Latin-1"):
defstr = self.recordDef(recName)
self.vdict = self.vardict(defstr)
self.fd = self.openfile(fileName, mode, encoding)
def getreclen(self, add = 0):
if self.reclen==int(0):
here = self.fd.tell()
try:
reclen = len(self.fd.readline()) + add
except:
reclen = 0
self.reclen = reclen
# self.fd.seek(here, 0)
# self.fd.seek(here, 0)
self.rewind()
return reclen
else:
return self.reclen
def Change_fielddef(self, recName):
defstr = self.recordDef(recName)
self.vdict = self.vardict(defstr)
try:
self.reclen = len(self.fd.readline()) + 1
except:
self.reclen = 0
return self.vdict
def openfile(self, fileName, mode, encoding):
try:
fd = open(fileName, mode, encoding="Latin-1")
except Exception as e:
print(e)
quit()
else:
return fd
# Read the record defenition from "field.def" file
def recordDef(self, recName):
fx = open("field.def", "r", encoding="Latin-1")
line = fx.readline()
while line:
line = line.split("#", 1)[0]
line.lstrip()
if (len(line) < 3):
line = fx.readline()
continue
name, defstr = line.split("=")
if (name.strip() == recName.strip()):
return(defstr)
line = fx.readline()
print(recName, ": Record Definition not found");
quit()
# Create a dict with each name="field Name"
# and Value = a list consiting of two elements
# 1) The Start Character Position and 2) End Char position
def vardict(self, defstr):
col = 0
recdict = dict()
nv = (item.split(" AS ") for item in defstr.split(","))
for item in nv:
num = int(item[0].strip())
recdict[(item[1]).strip()[0:-1]] = [col, col + num]
col = col + num
return(recdict)
def getline(self):
try:
line = self.fd.readline()
except Exception as e:
print("Error Reading Line", self.count)
finally:
return(line)
# Parse a line of data using the record defenition
# and create an easily accessible record.
def parseline(self, line):
recdef = self.vdict
recdict = {}
for name in recdef:
recdict[name] = line[recdef[name][0]:recdef[name][1]]
return(recdict)
# Reading each line and call Parse Rec
def readrec(self):
line = self.getline()
line = line.rstrip("\r\n")
line = line.rstrip("\n")
line = line.rstrip("\r")
if not line:
return False
rec = self.parseline(line)
self.count = self.count + 1
self.setrec(rec)
return(rec)
# Copy a record to this Object (self.record)
def setrec(self, rec):
for item in rec:
self.record[item] = rec[item]
# Write a Record as an output line
def writerec(self, rec):
self.setrec(rec)
line = ""
for field in self.vdict:
self.sizeadjust(field, self.record)
line = line + self.record[field]
line = line + "\r";
self.fd.write(line)
# Some fields in the record my be longer or shorter
# The size is adjusted to the correct size
def sizeadjust(self, field, rec):
length = self.vdict[field][1] - self.vdict[field][0]
length = 0 if (length < 0) else length
if (len(str(rec[field])) == length):
return
elif (length > len(str(rec[field]))):
while (length > len(str(rec[field]))):
rec[field] = str(rec[field]) + ' '
else:
rec[field] = rec[field][:length]
# Rewind takes to the begining of the file
def rewind(self):
self.fd.seek(0)
# Get a specific record
def getrec(self, rec_no):
# print(rec_no)
self.fd.seek(rec_no * self.reclen)
rec = self.readrec()
return(rec)
# Put a specific record
def putrec(self, rec, rec_no):
self.fd.seek(rec_no * self.reclen)
self.writerec(rec)
# Get maximum number of records in an open file
def getmaxrecs(self):
here = self.fd.tell()
last = self.fd.seek(0, 2)
nos = self.fd.tell() / self.reclen
self.fd.seek(here, 0)
return(nos)
|
python
|
import os
import posixpath
import sys
import docker
import json
from unittest import TestCase, skipUnless
from unittest.mock import Mock, call, patch, ANY
from pathlib import Path, WindowsPath
from parameterized import parameterized
from samcli.lib.build.build_graph import FunctionBuildDefinition, LayerBuildDefinition
from samcli.lib.providers.provider import ResourcesToBuildCollector
from samcli.lib.build.app_builder import (
ApplicationBuilder,
UnsupportedBuilderLibraryVersionError,
BuildError,
LambdaBuilderError,
ContainerBuildNotSupported,
BuildInsideContainerError,
DockerfileOutSideOfContext,
DockerBuildFailed,
DockerConnectionError,
)
from samcli.lib.utils.packagetype import IMAGE, ZIP
from samcli.lib.utils import osutils
from tests.unit.lib.build_module.test_build_graph import generate_function
class TestApplicationBuilder_build(TestCase):
def setUp(self):
self.build_dir = "builddir"
self.func1 = Mock()
self.func1.packagetype = ZIP
self.func1.name = "function_name1"
self.func1.full_path = posixpath.join("StackJ", "function_name1")
self.func1.get_build_dir = Mock()
self.func1.inlinecode = None
self.func2 = Mock()
self.func2.packagetype = ZIP
self.func2.name = "function_name2"
self.func2.full_path = posixpath.join("StackJ", "function_name2")
self.func2.get_build_dir = Mock()
self.func2.inlinecode = None
self.imageFunc1 = Mock()
self.imageFunc1.name = "function_name3"
self.imageFunc1.full_path = posixpath.join("StackJ", "function_name3")
self.imageFunc1.get_build_dir = Mock()
self.imageFunc1.inlinecode = None
self.layer1 = Mock()
self.layer2 = Mock()
self.imageFunc1.packagetype = IMAGE
self.layer1.build_method = "build_method"
self.layer1.full_path = os.path.join("StackJ", "layer_name1")
self.layer1.get_build_dir = Mock()
self.layer2.build_method = "build_method"
self.layer2.full_path = os.path.join("StackJ", "layer_name2")
self.layer2.get_build_dir = Mock()
resources_to_build_collector = ResourcesToBuildCollector()
resources_to_build_collector.add_functions([self.func1, self.func2, self.imageFunc1])
resources_to_build_collector.add_layers([self.layer1, self.layer2])
self.builder = ApplicationBuilder(resources_to_build_collector, "builddir", "basedir", "cachedir")
@patch("samcli.lib.build.build_graph.BuildGraph._write")
def test_must_iterate_on_functions_and_layers(self, persist_mock):
build_function_mock = Mock()
build_image_function_mock = Mock()
build_image_function_mock_return = Mock()
build_layer_mock = Mock()
def build_layer_return(
layer_name, layer_codeuri, layer_build_method, layer_compatible_runtimes, artifact_dir, layer_env_vars
):
return f"{layer_name}_location"
build_layer_mock.side_effect = build_layer_return
self.builder._build_function = build_function_mock
self.builder._build_lambda_image = build_image_function_mock
self.builder._build_layer = build_layer_mock
build_function_mock.side_effect = [
os.path.join(self.build_dir, "StackJ", "function_name1"),
os.path.join(self.build_dir, "StackJ", "function_name2"),
build_image_function_mock_return,
]
result = self.builder.build()
self.maxDiff = None
self.assertEqual(
result,
{
self.func1.full_path: os.path.join("builddir", "StackJ", "function_name1"),
self.func2.full_path: os.path.join("builddir", "StackJ", "function_name2"),
self.imageFunc1.full_path: build_image_function_mock_return,
self.layer1.full_path: f"{self.layer1.name}_location",
self.layer2.full_path: f"{self.layer2.name}_location",
},
)
build_function_mock.assert_has_calls(
[
call(
self.func1.name,
self.func1.codeuri,
ZIP,
self.func1.runtime,
self.func1.handler,
ANY,
self.func1.metadata,
ANY,
),
call(
self.func2.name,
self.func2.codeuri,
ZIP,
self.func2.runtime,
self.func2.handler,
ANY,
self.func2.metadata,
ANY,
),
call(
self.imageFunc1.name,
self.imageFunc1.codeuri,
IMAGE,
self.imageFunc1.runtime,
self.imageFunc1.handler,
ANY,
self.imageFunc1.metadata,
ANY,
),
],
any_order=False,
)
build_layer_mock.assert_has_calls(
[
call(
self.layer1.name,
self.layer1.codeuri,
self.layer1.build_method,
self.layer1.compatible_runtimes,
ANY,
ANY,
),
call(
self.layer2.name,
self.layer2.codeuri,
self.layer2.build_method,
self.layer2.compatible_runtimes,
ANY,
ANY,
),
]
)
@patch("samcli.lib.build.build_graph.BuildGraph._write")
def test_should_use_function_or_layer_get_build_dir_to_determine_artifact_dir(self, persist_mock):
def get_func_call_with_artifact_dir(artifact_dir):
return call(ANY, ANY, ANY, ANY, ANY, artifact_dir, ANY, ANY)
def get_layer_call_with_artifact_dir(artifact_dir):
return call(ANY, ANY, ANY, ANY, artifact_dir, ANY)
build_function_mock = Mock()
build_layer_mock = Mock()
self.builder._build_function = build_function_mock
self.builder._build_layer = build_layer_mock
self.builder.build()
# make sure function/layer's get_build_dir() is called with correct directory
self.func1.get_build_dir.assert_called_with(self.build_dir)
self.func2.get_build_dir.assert_called_with(self.build_dir)
self.imageFunc1.get_build_dir.assert_called_with(self.build_dir)
self.layer1.get_build_dir.assert_called_with(self.build_dir)
self.layer2.get_build_dir.assert_called_with(self.build_dir)
# make sure whatever is returned from .get_build_dir() is used for build function/layer
build_function_mock.assert_has_calls(
[
get_func_call_with_artifact_dir(self.func1.get_build_dir()),
get_func_call_with_artifact_dir(self.func2.get_build_dir()),
get_func_call_with_artifact_dir(self.imageFunc1.get_build_dir()),
]
)
build_layer_mock.assert_has_calls(
[
get_layer_call_with_artifact_dir(self.layer1.get_build_dir()),
get_layer_call_with_artifact_dir(self.layer2.get_build_dir()),
]
)
@patch("samcli.lib.build.build_graph.BuildGraph._write")
def test_should_generate_build_graph(self, persist_mock):
build_graph = self.builder._get_build_graph()
self.assertTrue(len(build_graph.get_function_build_definitions()), 2)
all_functions_in_build_graph = []
for build_definition in build_graph.get_function_build_definitions():
for function in build_definition.functions:
all_functions_in_build_graph.append(function)
self.assertTrue(self.func1 in all_functions_in_build_graph)
self.assertTrue(self.func2 in all_functions_in_build_graph)
@patch("samcli.lib.build.build_graph.BuildGraph._write")
@patch("samcli.lib.build.build_graph.BuildGraph._read")
@patch("samcli.lib.build.build_strategy.osutils")
def test_should_run_build_for_only_unique_builds(self, persist_mock, read_mock, osutils_mock):
build_function_mock = Mock()
# create 3 function resources where 2 of them would have same codeuri, runtime and metadata
function1_1 = generate_function("function1_1")
function1_2 = generate_function("function1_2")
function2 = generate_function("function2", runtime="different_runtime")
resources_to_build_collector = ResourcesToBuildCollector()
resources_to_build_collector.add_functions([function1_1, function1_2, function2])
build_dir = "builddir"
# instantiate the builder and run build method
builder = ApplicationBuilder(resources_to_build_collector, "builddir", "basedir", "cachedir")
builder._build_function = build_function_mock
build_function_mock.side_effect = [
function1_1.get_build_dir(build_dir),
function1_2.get_build_dir(build_dir),
function1_2.get_build_dir(build_dir),
]
result = builder.build()
# result should contain all 3 functions as expected
self.assertEqual(
result,
{
function1_1.full_path: function1_1.get_build_dir(build_dir),
function1_2.full_path: function1_2.get_build_dir(build_dir),
function2.full_path: function1_2.get_build_dir(build_dir),
},
)
# actual build should only be called twice since only 2 of the functions have unique build
build_function_mock.assert_has_calls(
[
call(
function1_1.name,
function1_1.codeuri,
ZIP,
function1_1.runtime,
function1_1.handler,
ANY,
function1_1.metadata,
ANY,
),
call(
function2.name,
function2.codeuri,
ZIP,
function2.runtime,
function2.handler,
ANY,
function2.metadata,
ANY,
),
],
any_order=True,
)
@patch("samcli.lib.build.app_builder.DefaultBuildStrategy")
def test_default_run_should_pick_default_strategy(self, mock_default_build_strategy_class):
mock_default_build_strategy = Mock()
mock_default_build_strategy_class.return_value = mock_default_build_strategy
build_graph_mock = Mock()
get_build_graph_mock = Mock(return_value=build_graph_mock)
builder = ApplicationBuilder(Mock(), "builddir", "basedir", "cachedir")
builder._get_build_graph = get_build_graph_mock
result = builder.build()
mock_default_build_strategy.build.assert_called_once()
self.assertEqual(result, mock_default_build_strategy.build())
@patch("samcli.lib.build.app_builder.CachedBuildStrategy")
def test_cached_run_should_pick_cached_strategy(self, mock_cached_build_strategy_class):
mock_cached_build_strategy = Mock()
mock_cached_build_strategy_class.return_value = mock_cached_build_strategy
build_graph_mock = Mock()
get_build_graph_mock = Mock(return_value=build_graph_mock)
builder = ApplicationBuilder(Mock(), "builddir", "basedir", "cachedir", cached=True)
builder._get_build_graph = get_build_graph_mock
result = builder.build()
mock_cached_build_strategy.build.assert_called_once()
self.assertEqual(result, mock_cached_build_strategy.build())
@patch("samcli.lib.build.app_builder.ParallelBuildStrategy")
def test_parallel_run_should_pick_parallel_strategy(self, mock_parallel_build_strategy_class):
mock_parallel_build_strategy = Mock()
mock_parallel_build_strategy_class.return_value = mock_parallel_build_strategy
build_graph_mock = Mock()
get_build_graph_mock = Mock(return_value=build_graph_mock)
builder = ApplicationBuilder(Mock(), "builddir", "basedir", "cachedir", parallel=True)
builder._get_build_graph = get_build_graph_mock
result = builder.build()
mock_parallel_build_strategy.build.assert_called_once()
self.assertEqual(result, mock_parallel_build_strategy.build())
@patch("samcli.lib.build.app_builder.ParallelBuildStrategy")
@patch("samcli.lib.build.app_builder.CachedBuildStrategy")
def test_parallel_and_cached_run_should_pick_parallel_with_cached_strategy(
self, mock_cached_build_strategy_class, mock_parallel_build_strategy_class
):
mock_parallel_build_strategy = Mock()
mock_parallel_build_strategy_class.return_value = mock_parallel_build_strategy
mock_cached_build_strategy = Mock()
mock_cached_build_strategy_class.return_value = mock_cached_build_strategy
build_graph_mock = Mock()
get_build_graph_mock = Mock(return_value=build_graph_mock)
builder = ApplicationBuilder(Mock(), "builddir", "basedir", "cachedir", parallel=True)
builder._get_build_graph = get_build_graph_mock
result = builder.build()
mock_parallel_build_strategy.build.assert_called_once()
self.assertEqual(result, mock_parallel_build_strategy.build())
class PathValidator:
def __init__(self, path):
self._path = path
def __eq__(self, other):
return self._path is None if other is None else other.endswith(self._path)
class TestApplicationBuilderForLayerBuild(TestCase):
def setUp(self):
self.layer1 = Mock()
self.layer2 = Mock()
self.container_manager = Mock()
resources_to_build_collector = ResourcesToBuildCollector()
resources_to_build_collector.add_layers([self.layer1, self.layer2])
self.builder = ApplicationBuilder(resources_to_build_collector, "builddir", "basedir", "cachedir")
@patch("samcli.lib.build.app_builder.get_workflow_config")
@patch("samcli.lib.build.app_builder.osutils")
@patch("samcli.lib.build.app_builder.get_layer_subfolder")
def test_must_build_layer_in_process(self, get_layer_subfolder_mock, osutils_mock, get_workflow_config_mock):
get_layer_subfolder_mock.return_value = "python"
config_mock = Mock()
config_mock.manifest_name = "manifest_name"
scratch_dir = "scratch"
osutils_mock.mkdir_temp.return_value.__enter__ = Mock(return_value=scratch_dir)
osutils_mock.mkdir_temp.return_value.__exit__ = Mock()
get_workflow_config_mock.return_value = config_mock
build_function_in_process_mock = Mock()
self.builder._build_function_in_process = build_function_in_process_mock
self.builder._build_layer("layer_name", "code_uri", "python3.8", ["python3.8"], "full_path")
build_function_in_process_mock.assert_called_once_with(
config_mock,
PathValidator("code_uri"),
PathValidator("python"),
"scratch",
PathValidator("manifest_name"),
"python3.8",
None,
)
@patch("samcli.lib.build.app_builder.get_workflow_config")
@patch("samcli.lib.build.app_builder.osutils")
@patch("samcli.lib.build.app_builder.get_layer_subfolder")
def test_must_build_layer_in_container(self, get_layer_subfolder_mock, osutils_mock, get_workflow_config_mock):
self.builder._container_manager = self.container_manager
get_layer_subfolder_mock.return_value = "python"
config_mock = Mock()
config_mock.manifest_name = "manifest_name"
scratch_dir = "scratch"
osutils_mock.mkdir_temp.return_value.__enter__ = Mock(return_value=scratch_dir)
osutils_mock.mkdir_temp.return_value.__exit__ = Mock()
get_workflow_config_mock.return_value = config_mock
build_function_on_container_mock = Mock()
self.builder._build_function_on_container = build_function_on_container_mock
self.builder._build_layer("layer_name", "code_uri", "python3.8", ["python3.8"], "full_path")
build_function_on_container_mock.assert_called_once_with(
config_mock,
PathValidator("code_uri"),
PathValidator("python"),
PathValidator("manifest_name"),
"python3.8",
None,
None,
None,
)
@patch("samcli.lib.build.app_builder.get_workflow_config")
@patch("samcli.lib.build.app_builder.osutils")
@patch("samcli.lib.build.app_builder.get_layer_subfolder")
def test_must_build_layer_in_container_with_global_build_image(
self, get_layer_subfolder_mock, osutils_mock, get_workflow_config_mock
):
self.builder._container_manager = self.container_manager
get_layer_subfolder_mock.return_value = "python"
config_mock = Mock()
config_mock.manifest_name = "manifest_name"
scratch_dir = "scratch"
osutils_mock.mkdir_temp.return_value.__enter__ = Mock(return_value=scratch_dir)
osutils_mock.mkdir_temp.return_value.__exit__ = Mock()
get_workflow_config_mock.return_value = config_mock
build_function_on_container_mock = Mock()
build_images = {None: "test_image"}
self.builder._build_images = build_images
self.builder._build_function_on_container = build_function_on_container_mock
self.builder._build_layer("layer_name", "code_uri", "python3.8", ["python3.8"], "full_path")
build_function_on_container_mock.assert_called_once_with(
config_mock,
PathValidator("code_uri"),
PathValidator("python"),
PathValidator("manifest_name"),
"python3.8",
None,
None,
"test_image",
)
@patch("samcli.lib.build.app_builder.get_workflow_config")
@patch("samcli.lib.build.app_builder.osutils")
@patch("samcli.lib.build.app_builder.get_layer_subfolder")
def test_must_build_layer_in_container_with_specific_build_image(
self, get_layer_subfolder_mock, osutils_mock, get_workflow_config_mock
):
self.builder._container_manager = self.container_manager
get_layer_subfolder_mock.return_value = "python"
config_mock = Mock()
config_mock.manifest_name = "manifest_name"
scratch_dir = "scratch"
osutils_mock.mkdir_temp.return_value.__enter__ = Mock(return_value=scratch_dir)
osutils_mock.mkdir_temp.return_value.__exit__ = Mock()
get_workflow_config_mock.return_value = config_mock
build_function_on_container_mock = Mock()
build_images = {"layer_name": "test_image"}
self.builder._build_images = build_images
self.builder._build_function_on_container = build_function_on_container_mock
self.builder._build_layer("layer_name", "code_uri", "python3.8", ["python3.8"], "full_path")
build_function_on_container_mock.assert_called_once_with(
config_mock,
PathValidator("code_uri"),
PathValidator("python"),
PathValidator("manifest_name"),
"python3.8",
None,
None,
"test_image",
)
class TestApplicationBuilder_update_template(TestCase):
def make_root_template(self, resource_type, location_property_name):
return {
"Resources": {
"MyFunction1": {"Type": "AWS::Serverless::Function", "Properties": {"CodeUri": "oldvalue"}},
"ChildStackXXX": {"Type": resource_type, "Properties": {location_property_name: "./child.yaml"}},
}
}
def setUp(self):
self.builder = ApplicationBuilder(Mock(), "builddir", "basedir", "cachedir")
self.template_dict = {
"Resources": {
"MyFunction1": {"Type": "AWS::Serverless::Function", "Properties": {"CodeUri": "oldvalue"}},
"MyFunction2": {"Type": "AWS::Lambda::Function", "Properties": {"Code": "oldvalue"}},
"GlueResource": {"Type": "AWS::Glue::Job", "Properties": {"Command": {"ScriptLocation": "something"}}},
"OtherResource": {"Type": "AWS::Lambda::Version", "Properties": {"CodeUri": "something"}},
"MyImageFunction1": {
"Type": "AWS::Lambda::Function",
"Properties": {"PackageType": "Image"},
"Metadata": {"Dockerfile": "Dockerfile", "DockerContext": "DockerContext", "DockerTag": "Tag"},
},
}
}
def test_must_update_resources_with_build_artifacts(self):
self.maxDiff = None
original_template_path = "/path/to/tempate.txt"
built_artifacts = {
"MyFunction1": "/path/to/build/MyFunction1",
"MyFunction2": "/path/to/build/MyFunction2",
"MyImageFunction1": "myimagefunction1:Tag",
}
expected_result = {
"Resources": {
"MyFunction1": {
"Type": "AWS::Serverless::Function",
"Properties": {"CodeUri": os.path.join("build", "MyFunction1")},
},
"MyFunction2": {
"Type": "AWS::Lambda::Function",
"Properties": {"Code": os.path.join("build", "MyFunction2")},
},
"GlueResource": {"Type": "AWS::Glue::Job", "Properties": {"Command": {"ScriptLocation": "something"}}},
"OtherResource": {"Type": "AWS::Lambda::Version", "Properties": {"CodeUri": "something"}},
"MyImageFunction1": {
"Type": "AWS::Lambda::Function",
"Properties": {"Code": "myimagefunction1:Tag", "PackageType": IMAGE},
"Metadata": {"Dockerfile": "Dockerfile", "DockerContext": "DockerContext", "DockerTag": "Tag"},
},
}
}
stack = Mock(stack_path="", template_dict=self.template_dict, location=original_template_path)
actual = self.builder.update_template(stack, built_artifacts, {})
self.assertEqual(actual, expected_result)
@parameterized.expand([("AWS::Serverless::Application", "Location"), ("AWS::CloudFormation::Stack", "TemplateURL")])
def test_must_update_resources_with_build_artifacts_and_template_paths_in_multi_stack(
self, resource_type, location_property_name
):
self.maxDiff = None
original_child_template_path = "/path/to/child.yaml"
original_root_template_path = "/path/to/template.yaml"
built_artifacts = {
"MyFunction1": "/path/to/build/MyFunction1",
"ChildStackXXX/MyFunction1": "/path/to/build/ChildStackXXX/MyFunction1",
"ChildStackXXX/MyFunction2": "/path/to/build/ChildStackXXX/MyFunction2",
"ChildStackXXX/MyImageFunction1": "myimagefunction1:Tag",
}
stack_output_paths = {
"": "/path/to/build/template.yaml",
"ChildStackXXX": "/path/to/build/ChildStackXXX/template.yaml",
}
expected_child = {
"Resources": {
"MyFunction1": {
"Type": "AWS::Serverless::Function",
"Properties": {"CodeUri": os.path.join("build", "ChildStackXXX", "MyFunction1")},
},
"MyFunction2": {
"Type": "AWS::Lambda::Function",
"Properties": {"Code": os.path.join("build", "ChildStackXXX", "MyFunction2")},
},
"GlueResource": {"Type": "AWS::Glue::Job", "Properties": {"Command": {"ScriptLocation": "something"}}},
"OtherResource": {"Type": "AWS::Lambda::Version", "Properties": {"CodeUri": "something"}},
"MyImageFunction1": {
"Type": "AWS::Lambda::Function",
"Properties": {"Code": "myimagefunction1:Tag", "PackageType": IMAGE},
"Metadata": {"Dockerfile": "Dockerfile", "DockerContext": "DockerContext", "DockerTag": "Tag"},
},
}
}
expected_root = {
"Resources": {
"MyFunction1": {
"Type": "AWS::Serverless::Function",
"Properties": {"CodeUri": os.path.join("build", "MyFunction1")},
},
"ChildStackXXX": {
"Type": resource_type,
"Properties": {
location_property_name: os.path.join("build", "ChildStackXXX", "template.yaml"),
},
},
}
}
stack_root = Mock(
stack_path="",
template_dict=self.make_root_template(resource_type, location_property_name),
location=original_root_template_path,
)
actual_root = self.builder.update_template(stack_root, built_artifacts, stack_output_paths)
stack_child = Mock(
stack_path="ChildStackXXX",
template_dict=self.template_dict,
location=original_child_template_path,
)
actual_child = self.builder.update_template(stack_child, built_artifacts, stack_output_paths)
self.assertEqual(expected_root, actual_root)
self.assertEqual(expected_child, actual_child)
def test_must_skip_if_no_artifacts(self):
built_artifacts = {}
stack = Mock(stack_path="", template_dict=self.template_dict, location="/foo/bar/template.txt")
actual = self.builder.update_template(stack, built_artifacts, {})
self.assertEqual(actual, self.template_dict)
class TestApplicationBuilder_update_template_windows(TestCase):
def setUp(self):
self.builder = ApplicationBuilder(Mock(), "builddir", "basedir", "cachedir")
self.template_dict = {
"Resources": {
"MyFunction1": {"Type": "AWS::Serverless::Function", "Properties": {"CodeUri": "oldvalue"}},
"MyFunction2": {"Type": "AWS::Lambda::Function", "Properties": {"Code": "oldvalue"}},
"GlueResource": {"Type": "AWS::Glue::Job", "Properties": {"Command": {"ScriptLocation": "something"}}},
"OtherResource": {"Type": "AWS::Lambda::Version", "Properties": {"CodeUri": "something"}},
"ChildStack1": {"Type": "AWS::Serverless::Application", "Properties": {"Location": "oldvalue"}},
"ChildStack2": {"Type": "AWS::CloudFormation::Stack", "Properties": {"TemplateURL": "oldvalue"}},
}
}
# Force os.path to be ntpath instead of posixpath on unix systems
import ntpath
self.saved_os_path_module = sys.modules["os.path"]
os.path = sys.modules["ntpath"]
def test_must_write_absolute_path_for_different_drives(self):
def mock_new(cls, *args, **kwargs):
cls = WindowsPath
self = cls._from_parts(args, init=False)
self._init()
return self
def mock_resolve(self):
return self
with patch("pathlib.Path.__new__", new=mock_new):
with patch("pathlib.Path.resolve", new=mock_resolve):
original_template_path = "C:\\path\\to\\template.txt"
function_1_path = "D:\\path\\to\\build\\MyFunction1"
function_2_path = "C:\\path2\\to\\build\\MyFunction2"
built_artifacts = {"MyFunction1": function_1_path, "MyFunction2": function_2_path}
child_1_path = "D:\\path\\to\\build\\ChildStack1\\template.yaml"
child_2_path = "C:\\path2\\to\\build\\ChildStack2\\template.yaml"
output_template_paths = {"ChildStack1": child_1_path, "ChildStack2": child_2_path}
expected_result = {
"Resources": {
"MyFunction1": {
"Type": "AWS::Serverless::Function",
"Properties": {"CodeUri": function_1_path},
},
"MyFunction2": {
"Type": "AWS::Lambda::Function",
"Properties": {"Code": "..\\..\\path2\\to\\build\\MyFunction2"},
},
"GlueResource": {
"Type": "AWS::Glue::Job",
"Properties": {"Command": {"ScriptLocation": "something"}},
},
"OtherResource": {"Type": "AWS::Lambda::Version", "Properties": {"CodeUri": "something"}},
"ChildStack1": {
"Type": "AWS::Serverless::Application",
"Properties": {"Location": child_1_path},
},
"ChildStack2": {
"Type": "AWS::CloudFormation::Stack",
"Properties": {"TemplateURL": "..\\..\\path2\\to\\build\\ChildStack2\\template.yaml"},
},
}
}
stack = Mock()
stack.stack_path = ""
stack.template_dict = self.template_dict
stack.location = original_template_path
actual = self.builder.update_template(stack, built_artifacts, output_template_paths)
self.assertEqual(actual, expected_result)
def tearDown(self):
os.path = self.saved_os_path_module
class TestApplicationBuilder_build_lambda_image_function(TestCase):
def setUp(self):
self.stream_mock = Mock()
self.docker_client_mock = Mock()
self.builder = ApplicationBuilder(
Mock(),
"/build/dir",
"/base/dir",
"/cached/dir",
stream_writer=self.stream_mock,
docker_client=self.docker_client_mock,
)
def test_docker_build_raises_docker_unavailable(self):
with self.assertRaises(DockerConnectionError):
metadata = {
"Dockerfile": "Dockerfile",
"DockerContext": "context",
"DockerTag": "Tag",
"DockerBuildArgs": {"a": "b"},
}
self.docker_client_mock.ping.side_effect = docker.errors.APIError(message="Mock Error")
self.builder._build_lambda_image("Name", metadata)
def test_docker_build_raises_DockerBuildFailed_when_error_in_buildlog_stream(self):
with self.assertRaises(DockerBuildFailed):
metadata = {
"Dockerfile": "Dockerfile",
"DockerContext": "context",
"DockerTag": "Tag",
"DockerBuildArgs": {"a": "b"},
}
self.docker_client_mock.api.build.return_value = [{"error": "Function building failed"}]
self.builder._build_lambda_image("Name", metadata)
def test_dockerfile_not_in_dockercontext(self):
with self.assertRaises(DockerfileOutSideOfContext):
metadata = {
"Dockerfile": "Dockerfile",
"DockerContext": "context",
"DockerTag": "Tag",
"DockerBuildArgs": {"a": "b"},
}
response_mock = Mock()
response_mock.status_code = 500
error_mock = Mock()
error_mock.side_effect = docker.errors.APIError(
"Bad Request", response=response_mock, explanation="Cannot locate specified Dockerfile"
)
self.builder._stream_lambda_image_build_logs = error_mock
self.docker_client_mock.api.build.return_value = []
self.builder._build_lambda_image("Name", metadata)
def test_error_rerasises(self):
with self.assertRaises(docker.errors.APIError):
metadata = {
"Dockerfile": "Dockerfile",
"DockerContext": "context",
"DockerTag": "Tag",
"DockerBuildArgs": {"a": "b"},
}
error_mock = Mock()
error_mock.side_effect = docker.errors.APIError("Bad Request", explanation="Some explanation")
self.builder._stream_lambda_image_build_logs = error_mock
self.docker_client_mock.api.build.return_value = []
self.builder._build_lambda_image("Name", metadata)
def test_can_build_image_function(self):
metadata = {
"Dockerfile": "Dockerfile",
"DockerContext": "context",
"DockerTag": "Tag",
"DockerBuildArgs": {"a": "b"},
}
self.docker_client_mock.api.build.return_value = []
result = self.builder._build_lambda_image("Name", metadata)
self.assertEqual(result, "name:Tag")
def test_can_build_image_function_without_tag(self):
metadata = {"Dockerfile": "Dockerfile", "DockerContext": "context", "DockerBuildArgs": {"a": "b"}}
self.docker_client_mock.api.build.return_value = []
result = self.builder._build_lambda_image("Name", metadata)
self.assertEqual(result, "name:latest")
@patch("samcli.lib.build.app_builder.os")
def test_can_build_image_function_under_debug(self, mock_os):
mock_os.environ.get.return_value = "debug"
metadata = {
"Dockerfile": "Dockerfile",
"DockerContext": "context",
"DockerTag": "Tag",
"DockerBuildArgs": {"a": "b"},
}
self.docker_client_mock.api.build.return_value = []
result = self.builder._build_lambda_image("Name", metadata)
self.assertEqual(result, "name:Tag-debug")
self.assertEqual(
self.docker_client_mock.api.build.call_args,
# NOTE (sriram-mv): path set to ANY to handle platform differences.
call(
path=ANY,
dockerfile="Dockerfile",
tag="name:Tag-debug",
buildargs={"a": "b", "SAM_BUILD_MODE": "debug"},
decode=True,
),
)
@patch("samcli.lib.build.app_builder.os")
def test_can_build_image_function_under_debug_with_target(self, mock_os):
mock_os.environ.get.return_value = "debug"
metadata = {
"Dockerfile": "Dockerfile",
"DockerContext": "context",
"DockerTag": "Tag",
"DockerBuildArgs": {"a": "b"},
"DockerBuildTarget": "stage",
}
self.docker_client_mock.api.build.return_value = []
result = self.builder._build_lambda_image("Name", metadata)
self.assertEqual(result, "name:Tag-debug")
self.assertEqual(
self.docker_client_mock.api.build.call_args,
call(
path=ANY,
dockerfile="Dockerfile",
tag="name:Tag-debug",
buildargs={"a": "b", "SAM_BUILD_MODE": "debug"},
decode=True,
target="stage",
),
)
class TestApplicationBuilder_build_function(TestCase):
def setUp(self):
self.builder = ApplicationBuilder(Mock(), "/build/dir", "/base/dir", "cachedir")
@patch("samcli.lib.build.app_builder.get_workflow_config")
@patch("samcli.lib.build.app_builder.osutils")
def test_must_build_in_process(self, osutils_mock, get_workflow_config_mock):
function_name = "function_name"
codeuri = "path/to/source"
packagetype = ZIP
runtime = "runtime"
scratch_dir = "scratch"
handler = "handler.handle"
config_mock = get_workflow_config_mock.return_value = Mock()
config_mock.manifest_name = "manifest_name"
osutils_mock.mkdir_temp.return_value.__enter__ = Mock(return_value=scratch_dir)
osutils_mock.mkdir_temp.return_value.__exit__ = Mock()
self.builder._build_function_in_process = Mock()
code_dir = str(Path("/base/dir/path/to/source").resolve())
artifacts_dir = str(Path("/build/dir/function_full_path"))
manifest_path = str(Path(os.path.join(code_dir, config_mock.manifest_name)).resolve())
self.builder._build_function(function_name, codeuri, ZIP, runtime, handler, artifacts_dir)
self.builder._build_function_in_process.assert_called_with(
config_mock, code_dir, artifacts_dir, scratch_dir, manifest_path, runtime, None
)
@patch("samcli.lib.build.app_builder.get_workflow_config")
@patch("samcli.lib.build.app_builder.osutils")
def test_must_build_in_process_with_metadata(self, osutils_mock, get_workflow_config_mock):
function_name = "function_name"
codeuri = "path/to/source"
runtime = "runtime"
packagetype = ZIP
scratch_dir = "scratch"
handler = "handler.handle"
config_mock = get_workflow_config_mock.return_value = Mock()
config_mock.manifest_name = "manifest_name"
osutils_mock.mkdir_temp.return_value.__enter__ = Mock(return_value=scratch_dir)
osutils_mock.mkdir_temp.return_value.__exit__ = Mock()
self.builder._build_function_in_process = Mock()
code_dir = str(Path("/base/dir/path/to/source").resolve())
artifacts_dir = str(Path("/build/dir/function_full_path"))
manifest_path = str(Path(os.path.join(code_dir, config_mock.manifest_name)).resolve())
self.builder._build_function(
function_name, codeuri, packagetype, runtime, handler, artifacts_dir, metadata={"BuildMethod": "Workflow"}
)
get_workflow_config_mock.assert_called_with(
runtime, code_dir, self.builder._base_dir, specified_workflow="Workflow"
)
self.builder._build_function_in_process.assert_called_with(
config_mock, code_dir, artifacts_dir, scratch_dir, manifest_path, runtime, None
)
@patch("samcli.lib.build.app_builder.get_workflow_config")
@patch("samcli.lib.build.app_builder.osutils")
def test_must_build_in_container(self, osutils_mock, get_workflow_config_mock):
function_name = "function_name"
codeuri = "path/to/source"
runtime = "runtime"
packagetype = ZIP
scratch_dir = "scratch"
handler = "handler.handle"
config_mock = get_workflow_config_mock.return_value = Mock()
config_mock.manifest_name = "manifest_name"
osutils_mock.mkdir_temp.return_value.__enter__ = Mock(return_value=scratch_dir)
osutils_mock.mkdir_temp.return_value.__exit__ = Mock()
self.builder._build_function_on_container = Mock()
code_dir = str(Path("/base/dir/path/to/source").resolve())
artifacts_dir = str(Path("/build/dir/function_full_path"))
manifest_path = str(Path(os.path.join(code_dir, config_mock.manifest_name)).resolve())
# Settting the container manager will make us use the container
self.builder._container_manager = Mock()
self.builder._build_function(function_name, codeuri, packagetype, runtime, handler, artifacts_dir)
self.builder._build_function_on_container.assert_called_with(
config_mock, code_dir, artifacts_dir, manifest_path, runtime, None, None, None
)
@patch("samcli.lib.build.app_builder.get_workflow_config")
@patch("samcli.lib.build.app_builder.osutils")
def test_must_build_in_container_with_env_vars(self, osutils_mock, get_workflow_config_mock):
function_name = "function_name"
codeuri = "path/to/source"
runtime = "runtime"
packagetype = ZIP
scratch_dir = "scratch"
handler = "handler.handle"
config_mock = get_workflow_config_mock.return_value = Mock()
config_mock.manifest_name = "manifest_name"
env_vars = {"TEST": "test"}
osutils_mock.mkdir_temp.return_value.__enter__ = Mock(return_value=scratch_dir)
osutils_mock.mkdir_temp.return_value.__exit__ = Mock()
self.builder._build_function_on_container = Mock()
code_dir = str(Path("/base/dir/path/to/source").resolve())
artifacts_dir = str(Path("/build/dir/function_name"))
manifest_path = str(Path(os.path.join(code_dir, config_mock.manifest_name)).resolve())
# Settting the container manager will make us use the container
self.builder._container_manager = Mock()
self.builder._build_function(
function_name, codeuri, packagetype, runtime, handler, artifacts_dir, container_env_vars=env_vars
)
self.builder._build_function_on_container.assert_called_with(
config_mock, code_dir, artifacts_dir, manifest_path, runtime, None, {"TEST": "test"}, None
)
@patch("samcli.lib.build.app_builder.get_workflow_config")
@patch("samcli.lib.build.app_builder.osutils")
def test_must_build_in_container_with_custom_specified_build_image(self, osutils_mock, get_workflow_config_mock):
function_name = "function_name"
codeuri = "path/to/source"
runtime = "runtime"
packagetype = ZIP
scratch_dir = "scratch"
handler = "handler.handle"
image_uri = "image uri"
build_images = {function_name: image_uri}
config_mock = get_workflow_config_mock.return_value = Mock()
config_mock.manifest_name = "manifest_name"
osutils_mock.mkdir_temp.return_value.__enter__ = Mock(return_value=scratch_dir)
osutils_mock.mkdir_temp.return_value.__exit__ = Mock()
self.builder._build_function_on_container = Mock()
code_dir = str(Path("/base/dir/path/to/source").resolve())
artifacts_dir = str(Path("/build/dir/function_name"))
manifest_path = str(Path(os.path.join(code_dir, config_mock.manifest_name)).resolve())
# Settting the container manager will make us use the container
self.builder._container_manager = Mock()
self.builder._build_images = build_images
self.builder._build_function(
function_name, codeuri, packagetype, runtime, handler, artifacts_dir, container_env_vars=None
)
self.builder._build_function_on_container.assert_called_with(
config_mock, code_dir, artifacts_dir, manifest_path, runtime, None, None, image_uri
)
@patch("samcli.lib.build.app_builder.get_workflow_config")
@patch("samcli.lib.build.app_builder.osutils")
def test_must_build_in_container_with_custom_default_build_image(self, osutils_mock, get_workflow_config_mock):
function_name = "function_name"
codeuri = "path/to/source"
runtime = "runtime"
packagetype = ZIP
scratch_dir = "scratch"
handler = "handler.handle"
image_uri = "image uri"
build_images = {"abc": "efg", None: image_uri}
config_mock = get_workflow_config_mock.return_value = Mock()
config_mock.manifest_name = "manifest_name"
osutils_mock.mkdir_temp.return_value.__enter__ = Mock(return_value=scratch_dir)
osutils_mock.mkdir_temp.return_value.__exit__ = Mock()
self.builder._build_function_on_container = Mock()
code_dir = str(Path("/base/dir/path/to/source").resolve())
artifacts_dir = str(Path("/build/dir/function_name"))
manifest_path = str(Path(os.path.join(code_dir, config_mock.manifest_name)).resolve())
# Settting the container manager will make us use the container
self.builder._container_manager = Mock()
self.builder._build_images = build_images
self.builder._build_function(
function_name, codeuri, packagetype, runtime, handler, artifacts_dir, container_env_vars=None
)
self.builder._build_function_on_container.assert_called_with(
config_mock, code_dir, artifacts_dir, manifest_path, runtime, None, None, image_uri
)
class TestApplicationBuilder_build_function_in_process(TestCase):
def setUp(self):
self.builder = ApplicationBuilder(Mock(), "/build/dir", "/base/dir", "/cache/dir", mode="mode")
@patch("samcli.lib.build.app_builder.LambdaBuilder")
def test_must_use_lambda_builder(self, lambda_builder_mock):
config_mock = Mock()
builder_instance_mock = lambda_builder_mock.return_value = Mock()
result = self.builder._build_function_in_process(
config_mock, "source_dir", "artifacts_dir", "scratch_dir", "manifest_path", "runtime", None
)
self.assertEqual(result, "artifacts_dir")
lambda_builder_mock.assert_called_with(
language=config_mock.language,
dependency_manager=config_mock.dependency_manager,
application_framework=config_mock.application_framework,
)
builder_instance_mock.build.assert_called_with(
"source_dir",
"artifacts_dir",
"scratch_dir",
"manifest_path",
runtime="runtime",
executable_search_paths=config_mock.executable_search_paths,
mode="mode",
options=None,
)
@patch("samcli.lib.build.app_builder.LambdaBuilder")
def test_must_raise_on_error(self, lambda_builder_mock):
config_mock = Mock()
builder_instance_mock = lambda_builder_mock.return_value = Mock()
builder_instance_mock.build.side_effect = LambdaBuilderError()
self.builder._get_build_options = Mock(return_value=None)
with self.assertRaises(BuildError):
self.builder._build_function_in_process(
config_mock, "source_dir", "artifacts_dir", "scratch_dir", "manifest_path", "runtime", None
)
class TestApplicationBuilder_build_function_on_container(TestCase):
def setUp(self):
self.container_manager = Mock()
self.builder = ApplicationBuilder(
Mock(), "/build/dir", "/base/dir", "/cache/dir", container_manager=self.container_manager, mode="mode"
)
self.builder._parse_builder_response = Mock()
@patch("samcli.lib.build.app_builder.LambdaBuildContainer")
@patch("samcli.lib.build.app_builder.lambda_builders_protocol_version")
@patch("samcli.lib.build.app_builder.LOG")
@patch("samcli.lib.build.app_builder.osutils")
def test_must_build_in_container(self, osutils_mock, LOGMock, protocol_version_mock, LambdaBuildContainerMock):
config = Mock()
log_level = LOGMock.getEffectiveLevel.return_value = "foo"
stdout_data = "container stdout response data"
response = {"result": {"artifacts_dir": "/some/dir"}}
def mock_wait_for_logs(stdout, stderr):
stdout.write(stdout_data.encode("utf-8"))
# Wire all mocks correctly
container_mock = LambdaBuildContainerMock.return_value = Mock()
container_mock.wait_for_logs = mock_wait_for_logs
self.builder._parse_builder_response.return_value = response
result = self.builder._build_function_on_container(
config, "source_dir", "artifacts_dir", "manifest_path", "runtime", None
)
self.assertEqual(result, "artifacts_dir")
LambdaBuildContainerMock.assert_called_once_with(
protocol_version_mock,
config.language,
config.dependency_manager,
config.application_framework,
"source_dir",
"manifest_path",
"runtime",
image=None,
log_level=log_level,
optimizations=None,
options=None,
executable_search_paths=config.executable_search_paths,
mode="mode",
env_vars={},
)
self.container_manager.run.assert_called_with(container_mock)
self.builder._parse_builder_response.assert_called_once_with(stdout_data, container_mock.image)
container_mock.copy.assert_called_with(response["result"]["artifacts_dir"] + "/.", "artifacts_dir")
self.container_manager.stop.assert_called_with(container_mock)
@patch("samcli.lib.build.app_builder.LambdaBuildContainer")
def test_must_raise_on_unsupported_container(self, LambdaBuildContainerMock):
config = Mock()
container_mock = LambdaBuildContainerMock.return_value = Mock()
container_mock.image = "image name"
container_mock.executable_name = "myexecutable"
self.container_manager.run.side_effect = docker.errors.APIError(
"Bad Request: 'lambda-builders' " "executable file not found in $PATH"
)
with self.assertRaises(UnsupportedBuilderLibraryVersionError) as ctx:
self.builder._build_function_on_container(
config, "source_dir", "artifacts_dir", "scratch_dir", "manifest_path", "runtime", {}
)
msg = (
"You are running an outdated version of Docker container 'image name' that is not compatible with"
"this version of SAM CLI. Please upgrade to continue to continue with build. "
"Reason: 'myexecutable executable not found in container'"
)
self.assertEqual(str(ctx.exception), msg)
self.container_manager.stop.assert_called_with(container_mock)
def test_must_raise_on_docker_not_running(self):
config = Mock()
self.container_manager.is_docker_reachable = False
with self.assertRaises(BuildInsideContainerError) as ctx:
self.builder._build_function_on_container(
config, "source_dir", "artifacts_dir", "scratch_dir", "manifest_path", "runtime", {}
)
self.assertEqual(
str(ctx.exception), "Docker is unreachable. Docker needs to be running to build inside a container."
)
@patch("samcli.lib.build.app_builder.supports_build_in_container")
def test_must_raise_on_unsupported_container_build(self, supports_build_in_container_mock):
config = Mock()
reason = "my reason"
supports_build_in_container_mock.return_value = (False, reason)
with self.assertRaises(ContainerBuildNotSupported) as ctx:
self.builder._build_function_on_container(
config, "source_dir", "artifacts_dir", "scratch_dir", "manifest_path", "runtime", {}
)
self.assertEqual(str(ctx.exception), reason)
class TestApplicationBuilder_parse_builder_response(TestCase):
def setUp(self):
self.image_name = "name"
self.builder = ApplicationBuilder(Mock(), "/build/dir", "/base/dir", "/cache/dir")
def test_must_parse_json(self):
data = {"valid": "json"}
result = self.builder._parse_builder_response(json.dumps(data), self.image_name)
self.assertEqual(result, data)
def test_must_fail_on_invalid_json(self):
data = "{invalid: json}"
with self.assertRaises(ValueError):
self.builder._parse_builder_response(data, self.image_name)
def test_must_raise_on_user_error(self):
msg = "invalid params"
data = {"error": {"code": 488, "message": msg}}
with self.assertRaises(BuildInsideContainerError) as ctx:
self.builder._parse_builder_response(json.dumps(data), self.image_name)
self.assertEqual(str(ctx.exception), msg)
def test_must_raise_on_version_mismatch(self):
msg = "invalid params"
data = {"error": {"code": 505, "message": msg}}
with self.assertRaises(UnsupportedBuilderLibraryVersionError) as ctx:
self.builder._parse_builder_response(json.dumps(data), self.image_name)
expected = str(UnsupportedBuilderLibraryVersionError(self.image_name, msg))
self.assertEqual(str(ctx.exception), expected)
def test_must_raise_on_method_not_found(self):
msg = "invalid method"
data = {"error": {"code": -32601, "message": msg}}
with self.assertRaises(UnsupportedBuilderLibraryVersionError) as ctx:
self.builder._parse_builder_response(json.dumps(data), self.image_name)
expected = str(UnsupportedBuilderLibraryVersionError(self.image_name, msg))
self.assertEqual(str(ctx.exception), expected)
def test_must_raise_on_all_other_codes(self):
msg = "builder crashed"
data = {"error": {"code": 1, "message": msg}}
with self.assertRaises(ValueError) as ctx:
self.builder._parse_builder_response(json.dumps(data), self.image_name)
self.assertEqual(str(ctx.exception), msg)
class TestApplicationBuilder_make_env_vars(TestCase):
def test_make_env_vars_with_env_file(self):
function1 = generate_function("Function1")
file_env_vars = {
"Parameters": {"ENV_VAR1": "1"},
"Function1": {"ENV_VAR2": "2"},
"Function2": {"ENV_VAR3": "3"},
}
result = ApplicationBuilder._make_env_vars(function1, file_env_vars, {})
self.assertEqual(result, {"ENV_VAR1": "1", "ENV_VAR2": "2"})
def test_make_env_vars_with_function_precedence(self):
function1 = generate_function("Function1")
file_env_vars = {
"Parameters": {"ENV_VAR1": "1"},
"Function1": {"ENV_VAR1": "2"},
"Function2": {"ENV_VAR3": "3"},
}
result = ApplicationBuilder._make_env_vars(function1, file_env_vars, {})
self.assertEqual(result, {"ENV_VAR1": "2"})
def test_make_env_vars_with_inline_env(self):
function1 = generate_function("Function1")
inline_env_vars = {
"Parameters": {"ENV_VAR1": "1"},
"Function1": {"ENV_VAR2": "2"},
"Function2": {"ENV_VAR3": "3"},
}
result = ApplicationBuilder._make_env_vars(function1, {}, inline_env_vars)
self.assertEqual(result, {"ENV_VAR1": "1", "ENV_VAR2": "2"})
def test_make_env_vars_with_both(self):
function1 = generate_function("Function1")
file_env_vars = {
"Parameters": {"ENV_VAR1": "1"},
"Function1": {"ENV_VAR2": "2"},
"Function2": {"ENV_VAR3": "3"},
}
inline_env_vars = {
"Parameters": {"ENV_VAR1": "2"},
"Function1": {"ENV_VAR2": "3"},
"Function2": {"ENV_VAR3": "3"},
}
result = ApplicationBuilder._make_env_vars(function1, file_env_vars, inline_env_vars)
self.assertEqual(result, {"ENV_VAR1": "2", "ENV_VAR2": "3"})
|
python
|
import requests
import json
coin_market_cap = requests.get(
"https://api.coingecko.com/api/v3/coins/markets?vs_currency=usd&order=market_cap_desc&per_page=100&page=1&sparkline=false",
headers = {"accept": "application/json"})
print("Enter the number of top cryptocurrencies by market capitalization: ")
n = int(input())
i = 0
while i < n:
print(str(i + 1) + '. "Cryptocurrency": ' + '"' + str(coin_market_cap.json()[i]['name']) + '"'', "market cap": ' +
str(coin_market_cap.json()[i]['market_cap']) + ', "current price": ' + str(coin_market_cap.json()[i]['current_price']))
i += 1
|
python
|
"""
"""
from __future__ import print_function
from abc import ABCMeta, abstractmethod
class BaseAgent:
"""
"""
__metaclass__ = ABCMeta
def __init__(self):
pass
@abstractmethod
def agent_init(self, agent_info= {}):
""" """
@abstractmethod
def agent_start(self, observation):
"""
"""
@abstractmethod
def agent_step(self, reward, observation):
"""
"""
@abstractmethod
def agent_end(self, reward):
"""
"""
@abstractmethod
def agent_cleanup(self):
""" """
@abstractmethod
def agent_message(self, message):
"""
"""
|
python
|
import torch
import gpytorch
from torch.nn.functional import softplus
from gpytorch.priors import NormalPrior, MultivariateNormalPrior
class LogRBFMean(gpytorch.means.Mean):
"""
Log of an RBF Kernel's spectral density
"""
def __init__(self, hypers = None):
super(LogRBFMean, self).__init__()
if hypers is not None:
self.register_parameter(name="constant", parameter=torch.nn.Parameter(hypers[-5] + softplus(hypers[-3]).log()))
self.register_parameter(name="lengthscale", parameter=torch.nn.Parameter(hypers[-4]))
else:
self.register_parameter(name="constant", parameter=torch.nn.Parameter(0. * torch.ones(1)))
self.register_parameter(name="lengthscale", parameter=torch.nn.Parameter(-0.3*torch.ones(1)))
# register prior
self.register_prior(name='constant_prior', prior=NormalPrior(torch.zeros(1), 100.*torch.ones(1), transform=None),
param_or_closure='constant')
self.register_prior(name='lengthscale_prior', prior=NormalPrior(torch.zeros(1), 100.*torch.ones(1), transform=torch.nn.functional.softplus),
param_or_closure='lengthscale')
def set_pars(self, hypers):
self.constant.data = hypers[-2]
self.lengthscale.data = hypers[-1]
def forward(self, input):
# logrbf up to constants is: c - t^1 / 2l
out = self.constant - input.pow(2).squeeze(-1) / (2 * (softplus(self.lengthscale.view(-1)) + 1e-7) )
return out
class LogRBFMean2D(gpytorch.means.Mean):
"""
Log of an RBF Kernel's spectral density
"""
def __init__(self, hypers = None):
super(LogRBFMean2D, self).__init__()
if hypers is not None:
self.register_parameter(name="constant", parameter=torch.nn.Parameter(hypers[-5] + softplus(hypers[-3]).log()))
self.register_parameter(name="lengthscale", parameter=torch.nn.Parameter(hypers[-4]))
else:
self.register_parameter(name="constant", parameter=torch.nn.Parameter(0. * torch.ones(1)))
self.register_parameter(name="lengthscale", parameter=torch.nn.Parameter(-0.3*torch.ones(1)))
# register prior
self.register_prior(name='constant_prior', prior=NormalPrior(torch.zeros(1), 100.*torch.ones(1), transform=None),
param_or_closure='constant')
self.register_prior(name='lengthscale_prior', prior=NormalPrior(torch.zeros(1), 100.*torch.ones(1), transform=torch.nn.functional.softplus),
param_or_closure='lengthscale')
def set_pars(self, hypers):
self.constant.data = hypers[-2]
self.lengthscale.data = hypers[-1]
def forward(self, input):
# logrbf up to constants is: c - t^1 / 2l
out = self.constant - input[:,0]*input[:,1]*(input[:,0]-input[:,1]).pow(2).squeeze(-1) / (2 * (softplus(self.lengthscale.view(-1)) + 1e-7) )
return out
#class LogRBFMean2D(gpytorch.means.Mean):
# """
# Log of 2D RBF Kernel's spectral density, with diagonal length-scale matrix.
# """
# def __init__(self, hypers = None):
# super(LogRBFMean2D, self).__init__()
# if hypers is not None:
# self.register_parameter(name="constant", parameter=torch.nn.Parameter(hypers[-5] + softplus(hypers[-3]).log()))
# self.register_parameter(name="lengthscale", parameter=torch.nn.Parameter(hypers[-4]))
# else:
# self.register_parameter(name="constant", parameter=torch.nn.Parameter(0. * torch.ones(2)))
# self.register_parameter(name="lengthscale", parameter=torch.nn.Parameter(-0.3*torch.ones(2)))
#
# # register prior
# self.register_prior(name='constant_prior',
# prior = MultivariateNormalPrior(torch.zeros(2), covariance_matrix=100.*torch.eye(2), transform=None),
# param_or_closure='constant')
# self.register_prior(name='lengthscale_prior',
# prior = MultivariateNormalPrior(torch.zeros(2), covariance_matrix=100.*torch.eye(2), transform=torch.nn.functional.softplus),
# param_or_closure='lengthscale')
#
# def set_pars(self, hypers):
# self.constant.data = hypers[-2]
# self.lengthscale.data = hypers[-1]
#
# def forward(self, input):
# # logrbf up to constants is: c - t^2 / 2l
# out = self.constant - input.pow(2).sum(dim=1).squeeze(-1) / (2 * (softplus(self.lengthscale.view(-1)) + 1e-7) )
# return out
|
python
|
# python
import lx, lxifc, lxu, modo
import tagger
from os.path import basename, splitext
CMD_NAME = tagger.CMD_SET_PTAG
def material_tags_list():
res = set(tagger.scene.all_tags_by_type(lx.symbol.i_POLYTAG_MATERIAL))
for type, tag in tagger.items.get_all_masked_tags():
if type == "material":
res.add(tag)
return list(res)
class CommandClass(tagger.CommanderClass):
# _commander_default_values = []
def commander_arguments(self):
return [
{
'name': tagger.TAG,
'label': tagger.LABEL_TAG,
'datatype': 'string',
'default': "",
'flags': [],
'values_list_type': 'sPresetText',
'values_list': material_tags_list
}, {
'name': tagger.PRESET,
'label': tagger.LABEL_PRESET,
'datatype': 'string',
'default': tagger.RANDOM,
'values_list_type': 'popup',
'values_list': tagger.presets.presets_popup,
'flags': ['optional', 'query']
}, {
'name': tagger.SCOPE,
'label': tagger.LABEL_SCOPE,
'datatype': 'string',
'default': tagger.SCOPE_SELECTED,
'values_list_type': 'popup',
'values_list': tagger.POPUPS_SCOPE,
'flags': ['optional']
}, {
'name': tagger.TAGTYPE,
'label': tagger.LABEL_TAGTYPE,
'datatype': 'string',
'default': tagger.MATERIAL,
'values_list_type': 'popup',
'values_list': tagger.POPUPS_TAGTYPES,
'flags': ['optional']
}, {
'name': tagger.WITH_EXISTING,
'label': tagger.LABEL_WITH_EXISTING,
'datatype': 'string',
'default': tagger.USE,
'values_list_type': 'popup',
'values_list': tagger.POPUPS_WITH_EXISTING,
'flags': ['optional']
}
]
def commander_execute(self, msg, flags):
pTag = self.commander_arg_value(0)
preset = self.commander_arg_value(1, tagger.RANDOM)
connected = self.commander_arg_value(2, tagger.SCOPE_FLOOD)
tagType = self.commander_arg_value(3, tagger.MATERIAL)
withExisting = self.commander_arg_value(4)
if preset == tagger.RANDOM:
preset = None
i_POLYTAG = tagger.convert_to_iPOLYTAG(tagType)
if not pTag:
if not preset:
pTag = tagger.DEFAULT_MATERIAL_NAME
elif not preset.endswith(".lxp"):
pTag = tagger.DEFAULT_MATERIAL_NAME
elif preset.endswith(".lxp"):
pTag = splitext(basename(preset))[0]
# find any existing masks for this pTag
existing_masks = tagger.shadertree.get_masks( pTags = { pTag: i_POLYTAG })
# tag the polys
args = tagger.build_arg_string({
tagger.TAGTYPE: tagType,
tagger.TAG: pTag,
tagger.SCOPE: connected
})
lx.eval("!" + tagger.CMD_PTAG_SET + args)
# build a new mask if we need one
if not existing_masks:
new_mask = tagger.shadertree.build_material(i_POLYTAG = i_POLYTAG, pTag = pTag, preset = preset)
tagger.shadertree.move_to_base_shader(new_mask)
elif existing_masks and withExisting == tagger.USE:
pass
elif existing_masks and withExisting == tagger.KEEP:
new_mask = tagger.shadertree.build_material(i_POLYTAG = i_POLYTAG, pTag = pTag, preset = preset)
tagger.shadertree.move_to_base_shader(new_mask)
elif existing_masks and withExisting == tagger.REMOVE:
new_mask = tagger.shadertree.build_material(i_POLYTAG = i_POLYTAG, pTag = pTag, preset = preset)
tagger.shadertree.move_to_base_shader(new_mask)
tagger.safe_removeItems(existing_masks, True)
elif existing_masks and withExisting == tagger.CONSOLIDATE:
new_mask = tagger.shadertree.build_material(i_POLYTAG = i_POLYTAG, pTag = pTag, preset = preset)
tagger.shadertree.move_to_base_shader(new_mask)
consolidation_masks = tagger.shadertree.consolidate(pTags = { pTag: i_POLYTAG })
new_mask.setParent(consolidation_masks[pTag])
tagger.shadertree.move_to_top(new_mask)
notifier = tagger.Notifier()
notifier.Notify(lx.symbol.fCMDNOTIFY_DATATYPE)
lx.bless(CommandClass, CMD_NAME)
|
python
|
import turtle
'''this makes a circle by building many squares'''
def draw_square(tom):
for _ in range(4):
tom.forward(100)
tom.right(90)
def draw_flower():
window = turtle.Screen()
window.bgcolor("red")
brad = turtle.Turtle()
brad.speed(0)
brad.color("blue")
for i in range(72):
draw_square(brad)
brad.right(5)
brad.color("green")
brad.right(90)
brad.forward(250)
brad.right(90)
brad.forward(3)
brad.right(90)
brad.forward(250)
window.exitonclick()
draw_flower()
|
python
|
class Solution:
def maxProfit(self, prices):
index = 0
flag = False
ans = 0
i = 1
n = len(prices)
while i < n:
if prices[i] > prices[i - 1]:
flag = True
else:
if flag:
ans += prices[i - 1] - prices[index]
flag = False
index = i
i += 1
if flag:
ans += prices[n - 1] - prices[index]
return ans
if __name__ == '__main__':
prices = [7, 1, 5, 3, 6, 4]
print(Solution().maxProfit(prices))
|
python
|
from bs4 import BeautifulSoup
from requests.exceptions import RequestException
from lxml import etree
import requests
import re
def get_links(who_sells=0):
# urls = []
list_view = 'http://bj.58.com/pbdn/{}/'.format(str(who_sells))
print(list_view)
wb_data = requests.get(list_view, headers=headers)
# print(wb_data.text)
# soup = BeautifulSoup(web_data.text, 'lxml')
p = r'<a\sonClick="clickLog\(\'from=zzpc_infoclick\'\);"\shref="(.*?)"\starget="_blank">'
links = re.findall(p, wb_data.text)
# links = soup.select('td.t')
print(1, links)
def get_info(url):
try:
if web_data.status_code == 200:
title = soup.title.text
# print(title)
price = soup.select('#content span.price')
date = soup.select('li.time')
# print(date)
area = soup.select('span.c_25d')
# print(list(area[0].stripped_strings))
data = {
'title': title,
'price': price[0].text,
'date': date[0].text,
'area': list(area[0].stripped_strings),
'cate': None,
'views': None
}
print(11, data)
except RequestException:
pass
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'
}
url = 'http://bj.58.com/pingbandiannao/30223879694911x.shtml'
web_data = requests.get(url, headers=headers)
# print(web_data.text)
soup = BeautifulSoup(web_data.text, 'lxml')
get_info(url)
get_links(0)
'''
#infolist > div.left > a.title.t
'''
|
python
|
# -*- coding: utf-8 -*-
import os
import json
from logging import getLogger
from six import string_types, text_type
from collections import OrderedDict
from ckan import logic
from ckan import model
import ckan.plugins as p
from ckan.lib.plugins import DefaultDatasetForm
try:
from ckan.lib.plugins import DefaultTranslation
except ImportError:
class DefaultTranslation():
pass
import ckanext.harvest
from ckanext.harvest.model import setup as model_setup
from ckanext.harvest.model import HarvestSource, HarvestJob, HarvestObject
from ckanext.harvest.log import DBLogHandler
from ckanext.harvest.utils import (
DATASET_TYPE_NAME
)
if p.toolkit.check_ckan_version(min_version='2.9.0'):
from ckanext.harvest.plugin.flask_plugin import MixinPlugin
else:
from ckanext.harvest.plugin.pylons_plugin import MixinPlugin
log = getLogger(__name__)
assert not log.disabled
class Harvest(MixinPlugin, p.SingletonPlugin, DefaultDatasetForm, DefaultTranslation):
p.implements(p.IConfigurable)
p.implements(p.IConfigurer, inherit=True)
p.implements(p.IActions)
p.implements(p.IAuthFunctions)
p.implements(p.IDatasetForm)
p.implements(p.IPackageController, inherit=True)
p.implements(p.ITemplateHelpers)
p.implements(p.IFacets, inherit=True)
if p.toolkit.check_ckan_version(min_version='2.5.0'):
p.implements(p.ITranslation, inherit=True)
startup = False
# ITranslation
def i18n_directory(self):
u'''Change the directory of the .mo translation files'''
return os.path.join(
os.path.dirname(ckanext.harvest.__file__),
'i18n'
)
# IPackageController
def after_create(self, context, data_dict):
if 'type' in data_dict and data_dict['type'] == DATASET_TYPE_NAME and not self.startup:
# Create an actual HarvestSource object
_create_harvest_source_object(context, data_dict)
def after_update(self, context, data_dict):
if 'type' in data_dict and data_dict['type'] == DATASET_TYPE_NAME:
# Edit the actual HarvestSource object
_update_harvest_source_object(context, data_dict)
def after_delete(self, context, data_dict):
package_dict = p.toolkit.get_action('package_show')(context, {'id': data_dict['id']})
if 'type' in package_dict and package_dict['type'] == DATASET_TYPE_NAME:
# Delete the actual HarvestSource object
_delete_harvest_source_object(context, package_dict)
def before_view(self, data_dict):
# check_ckan_version should be more clever than this
if p.toolkit.check_ckan_version(max_version='2.1.99') and (
'type' not in data_dict or data_dict['type'] != DATASET_TYPE_NAME):
# This is a normal dataset, check if it was harvested and if so, add
# info about the HarvestObject and HarvestSource
harvest_object = model.Session.query(HarvestObject) \
.filter(HarvestObject.package_id == data_dict['id']) \
.filter(HarvestObject.current==True).first() # noqa
if harvest_object:
for key, value in [
('harvest_object_id', harvest_object.id),
('harvest_source_id', harvest_object.source.id),
('harvest_source_title', harvest_object.source.title),
]:
_add_extra(data_dict, key, value)
return data_dict
def before_search(self, search_params):
'''Prevents the harvesters being shown in dataset search results.'''
fq = search_params.get('fq', '')
if 'dataset_type:harvest' not in fq:
fq = u"{0} -dataset_type:harvest".format(search_params.get('fq', ''))
search_params.update({'fq': fq})
return search_params
def after_show(self, context, data_dict):
if 'type' in data_dict and data_dict['type'] == DATASET_TYPE_NAME:
# This is a harvest source dataset, add extra info from the
# HarvestSource object
source = HarvestSource.get(data_dict['id'])
if not source:
log.error('Harvest source not found for dataset {0}'.format(data_dict['id']))
return data_dict
st_action_name = 'harvest_source_show_status'
try:
status_action = p.toolkit.get_action(st_action_name)
except KeyError:
logic.clear_actions_cache()
status_action = p.toolkit.get_action(st_action_name)
data_dict['status'] = status_action(context, {'id': source.id})
elif 'type' not in data_dict or data_dict['type'] != DATASET_TYPE_NAME:
# This is a normal dataset, check if it was harvested and if so, add
# info about the HarvestObject and HarvestSource
harvest_object = model.Session.query(HarvestObject) \
.filter(HarvestObject.package_id == data_dict['id']) \
.filter(HarvestObject.current == True).first() # noqa
# If the harvest extras are there, remove them. This can happen eg
# when calling package_update or resource_update, which call
# package_show
if data_dict.get('extras'):
data_dict['extras'][:] = [e for e in data_dict.get('extras', [])
if not e['key']
in ('harvest_object_id', 'harvest_source_id', 'harvest_source_title',)]
# We only want to add these extras at index time so they are part
# of the cached data_dict used to display, search results etc. We
# don't want them added when editing the dataset, otherwise we get
# duplicated key errors.
# The only way to detect indexing right now is checking that
# validate is set to False.
if harvest_object and not context.get('validate', True):
for key, value in [
('harvest_object_id', harvest_object.id),
('harvest_source_id', harvest_object.source.id),
('harvest_source_title', harvest_object.source.title),
]:
_add_extra(data_dict, key, value)
return data_dict
# IDatasetForm
def is_fallback(self):
return False
def package_types(self):
return [DATASET_TYPE_NAME]
def package_form(self):
return 'source/new_source_form.html'
def search_template(self):
return 'source/search.html'
def read_template(self):
return 'source/read.html'
def new_template(self):
return 'source/new.html'
def edit_template(self):
return 'source/edit.html'
def setup_template_variables(self, context, data_dict):
p.toolkit.c.dataset_type = DATASET_TYPE_NAME
def create_package_schema(self):
'''
Returns the schema for mapping package data from a form to a format
suitable for the database.
'''
from ckanext.harvest.logic.schema import harvest_source_create_package_schema
schema = harvest_source_create_package_schema()
if self.startup:
schema['id'] = [text_type]
return schema
def update_package_schema(self):
'''
Returns the schema for mapping package data from a form to a format
suitable for the database.
'''
from ckanext.harvest.logic.schema import harvest_source_update_package_schema
schema = harvest_source_update_package_schema()
return schema
def show_package_schema(self):
'''
Returns the schema for mapping package data from the database into a
format suitable for the form
'''
from ckanext.harvest.logic.schema import harvest_source_show_package_schema
return harvest_source_show_package_schema()
def configure(self, config):
self.startup = True
# Setup harvest model
model_setup()
# Configure database logger
_configure_db_logger(config)
self.startup = False
def update_config(self, config):
if not p.toolkit.check_ckan_version(min_version='2.0'):
assert 0, 'CKAN before 2.0 not supported by ckanext-harvest - '\
'genshi templates not supported any more'
if p.toolkit.asbool(config.get('ckan.legacy_templates', False)):
log.warn('Old genshi templates not supported any more by '
'ckanext-harvest so you should set ckan.legacy_templates '
'option to True any more.')
p.toolkit.add_template_directory(config, '../templates')
p.toolkit.add_public_directory(config, '../public')
p.toolkit.add_resource('../fanstatic_library', 'ckanext-harvest')
p.toolkit.add_resource('../public/ckanext/harvest/javascript', 'harvest-extra-field')
if p.toolkit.check_ckan_version(min_version='2.9.0'):
mappings = config.get('ckan.legacy_route_mappings', {})
if isinstance(mappings, string_types):
mappings = json.loads(mappings)
mappings.update({
'harvest_read': 'harvest.read',
'harvest_edit': 'harvest.edit',
})
bp_routes = [
"delete", "refresh", "admin", "about",
"clear", "job_list", "job_show_last", "job_show",
"job_abort", "object_show"
]
mappings.update({
'harvest_' + route: 'harvester.' + route
for route in bp_routes
})
# https://github.com/ckan/ckan/pull/4521
config['ckan.legacy_route_mappings'] = json.dumps(mappings)
# IActions
def get_actions(self):
module_root = 'ckanext.harvest.logic.action'
action_functions = _get_logic_functions(module_root)
return action_functions
# IAuthFunctions
def get_auth_functions(self):
module_root = 'ckanext.harvest.logic.auth'
auth_functions = _get_logic_functions(module_root)
return auth_functions
# ITemplateHelpers
def get_helpers(self):
from ckanext.harvest import helpers as harvest_helpers
return {
'package_list_for_source': harvest_helpers.package_list_for_source,
'package_count_for_source': harvest_helpers.package_count_for_source,
'harvesters_info': harvest_helpers.harvesters_info,
'harvester_types': harvest_helpers.harvester_types,
'harvest_frequencies': harvest_helpers.harvest_frequencies,
'harvest_times': harvest_helpers.harvest_times,
'harvest_default_time': harvest_helpers.harvest_default_time,
'link_for_harvest_object': harvest_helpers.link_for_harvest_object,
'harvest_source_extra_fields': harvest_helpers.harvest_source_extra_fields,
'bootstrap_version': harvest_helpers.bootstrap_version,
'get_harvest_source': harvest_helpers.get_harvest_source,
'get_latest_job': harvest_helpers.get_latest_job,
}
def dataset_facets(self, facets_dict, package_type):
if package_type != 'harvest':
return facets_dict
return OrderedDict([('frequency', 'Frequency'),
('source_type', 'Type'),
('organization', 'Organization'),
])
def organization_facets(self, facets_dict, organization_type, package_type):
if package_type != 'harvest':
return facets_dict
return OrderedDict([('frequency', 'Frequency'),
('source_type', 'Type'),
('organization', 'Organization'),
])
def _add_extra(data_dict, key, value):
if 'extras' not in data_dict:
data_dict['extras'] = []
data_dict['extras'].append({
'key': key, 'value': value, 'state': u'active'
})
def _get_logic_functions(module_root, logic_functions={}):
for module_name in ['get', 'create', 'update', 'patch', 'delete']:
module_path = '%s.%s' % (module_root, module_name,)
module = __import__(module_path)
for part in module_path.split('.')[1:]:
module = getattr(module, part)
for key, value in module.__dict__.items():
if not key.startswith('_') and (hasattr(value, '__call__')
and (value.__module__ == module_path)):
logic_functions[key] = value
return logic_functions
def _create_harvest_source_object(context, data_dict):
'''
Creates an actual HarvestSource object with the data dict
of the harvest_source dataset. All validation and authorization
checks should be used by now, so this function is not to be used
directly to create harvest sources. The created harvest source will
have the same id as the dataset.
:param data_dict: A standard package data_dict
:returns: The created HarvestSource object
:rtype: HarvestSource object
'''
log.info('Creating harvest source: %r', data_dict)
source = HarvestSource()
source.id = data_dict['id']
source.url = data_dict['url'].strip()
# Avoids clashes with the dataset type
source.type = data_dict['source_type']
opt = ['active', 'title', 'description', 'user_id',
'publisher_id', 'config', 'frequency', 'time']
for o in opt:
if o in data_dict and data_dict[o] is not None:
source.__setattr__(o, data_dict[o])
source.active = not data_dict.get('state', None) == 'deleted'
# Don't commit yet, let package_create do it
source.add()
log.info('Harvest source created: %s', source.id)
return source
def _update_harvest_source_object(context, data_dict):
'''
Updates an actual HarvestSource object with the data dict
of the harvest_source dataset. All validation and authorization
checks should be used by now, so this function is not to be used
directly to update harvest sources.
:param data_dict: A standard package data_dict
:returns: The created HarvestSource object
:rtype: HarvestSource object
'''
source_id = data_dict.get('id')
log.info('Harvest source %s update: %r', source_id, data_dict)
source = HarvestSource.get(source_id)
if not source:
log.error('Harvest source %s does not exist', source_id)
raise logic.NotFound('Harvest source %s does not exist' % source_id)
fields = ['url', 'title', 'description', 'user_id',
'publisher_id', 'frequency', 'time']
for f in fields:
if f in data_dict and data_dict[f] is not None:
if f == 'url':
data_dict[f] = data_dict[f].strip()
source.__setattr__(f, data_dict[f])
# Avoids clashes with the dataset type
if 'source_type' in data_dict:
source.type = data_dict['source_type']
if 'config' in data_dict:
source.config = data_dict['config']
# Don't change state unless explicitly set in the dict
if 'state' in data_dict:
source.active = data_dict.get('state') == 'active'
# Don't commit yet, let package_create do it
source.add()
# Abort any pending jobs
if not source.active:
jobs = HarvestJob.filter(source=source, status=u'New')
log.info('Harvest source %s not active, so aborting %i outstanding jobs', source_id, jobs.count())
if jobs:
for job in jobs:
job.status = u'Aborted'
job.add()
return source
def _delete_harvest_source_object(context, data_dict):
'''
Deletes an actual HarvestSource object with the id provided on the
data dict of the harvest_source dataset. Similarly to the datasets,
the source object is not actually deleted, just flagged as inactive.
All validation and authorization checks should be used by now, so
this function is not to be used directly to delete harvest sources.
:param data_dict: A standard package data_dict
:returns: The deleted HarvestSource object
:rtype: HarvestSource object
'''
source_id = data_dict.get('id')
log.info('Deleting harvest source: %s', source_id)
source = HarvestSource.get(source_id)
if not source:
log.warn('Harvest source %s does not exist', source_id)
raise p.toolkit.ObjectNotFound('Harvest source %s does not exist' % source_id)
# Don't actually delete the record, just flag it as inactive
source.active = False
source.save()
# Abort any pending jobs
jobs = HarvestJob.filter(source=source, status=u'New')
if jobs:
log.info('Aborting %i jobs due to deleted harvest source', jobs.count())
for job in jobs:
job.status = u'Aborted'
job.save()
log.debug('Harvest source %s deleted', source_id)
return source
def _configure_db_logger(config):
# Log scope
#
# -1 - do not log to the database
# 0 - log everything
# 1 - model, logic.action, logic.validators, harvesters
# 2 - model, logic.action, logic.validators
# 3 - model, logic.action
# 4 - logic.action
# 5 - model
# 6 - plugin
# 7 - harvesters
#
scope = p.toolkit.asint(config.get('ckan.harvest.log_scope', -1))
if scope == -1:
return
parent_logger = 'ckanext.harvest'
children = ['plugin', 'model', 'logic.action.create', 'logic.action.delete',
'logic.action.get', 'logic.action.patch', 'logic.action.update',
'logic.validators', 'harvesters.base', 'harvesters.ckanharvester']
children_ = {0: children, 1: children[1:], 2: children[1:-2],
3: children[1:-3], 4: children[2:-3], 5: children[1:2],
6: children[:1], 7: children[-2:]}
# Get log level from config param - default: DEBUG
from logging import DEBUG, INFO, WARNING, ERROR, CRITICAL
level = config.get('ckan.harvest.log_level', 'debug').upper()
if level == 'DEBUG':
level = DEBUG
elif level == 'INFO':
level = INFO
elif level == 'WARNING':
level = WARNING
elif level == 'ERROR':
level = ERROR
elif level == 'CRITICAL':
level = CRITICAL
else:
level = DEBUG
loggers = children_.get(scope)
# Get root logger and set db handler
logger = getLogger(parent_logger)
if scope < 1:
logger.addHandler(DBLogHandler(level=level))
# Set db handler to all child loggers
for _ in loggers:
child_logger = logger.getChild(_)
child_logger.addHandler(DBLogHandler(level=level))
|
python
|
# coding=utf-8
# IP地址取自国内髙匿代理IP网站:http://www.xicidaili.com/nn/
# 仅仅爬取首页IP地址就足够一般使用
import telnetlib
from bs4 import BeautifulSoup
import requests
import random
URL = 'http://www.xicidaili.com/nn/'
HEADERS = {
'User-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'
}
def get_ip_list(url=URL, headers=HEADERS):
web_data = requests.get(url, headers=headers)
soup = BeautifulSoup(web_data.text, 'lxml')
ips = soup.find_all('tr')
ip_list = []
for i in range(1, len(ips)):
ip_info = ips[i]
tds = ip_info.find_all('td')
ip_list.append(tds[1].text + ':' + tds[2].text)
return ip_list
def get_random_ip():
ip_list = get_ip_list()
# 列表生成式
proxy_list = ['http://' + ip for ip in ip_list]
# 随机选取ip
proxy_ip = random.choice(proxy_list)
proxy_dict = {'http': proxy_ip}
return proxy_dict
def test_ip():
ip_dict = get_random_ip()
# 这里假设有ip_list中某一ip
l= ip_dict['http']
http, ip_port= l.split('://')
ip,port=ip_port.split(':')
print(ip)
print(port)
try:
telnetlib.Telnet(ip, port=port, timeout=5)
except:
print('失败')
else:
print('成功')
return ip_dict
if __name__ == '__main__':
# print(get_ip_list())
# proxy_dict = get_random_ip()
# print(proxy_dict)
ip_dict = test_ip()
# ip, port = ("http://110.73.2.182", "8123")
# proxy_url = "{0}:{1}".format(ip, port)
# print(proxy_url)
|
python
|
import struct
__all__ = ['AbstractEnumValue', 'IntValue', 'KnobModeEnum', 'PadModeEnum', 'SusModeEnum']
class AbstractEnumValue (object):
_VALUES = {}
def __init__(self, val):
if isinstance(val, int):
try:
self._value = next(k for k, v in self._VALUES.items() if v == val)
except StopIteration:
raise ValueError("Invalid value '%d' for enum '%s'"
% (val, self.__class__.__name__))
elif isinstance(val, str):
if val in self._VALUES:
self._value = val
else:
raise ValueError("Invalid value '%s' for enum '%s'"
% (val, self.__class__.__name__))
else:
raise ValueError("Enum must be instantiated with int or string.")
def as_string(self):
return self._value
def as_int(self):
return self._VALUES[self._value]
def enum_vals(self):
return self._VALUES.items()
def serialize(self):
return struct.pack('B', self.as_int())
@classmethod
def num_bytes(cls):
return 1
@classmethod
def deserialize(cls, b):
return cls(int(b[0]))
class IntValue (object):
def __init__(self, val):
if not isinstance(val, int):
raise ValueError("Invalid type '%s', expected int."
% (val.__class__.__name__))
self._value = val
def as_int(self):
return self._value
def serialize(self):
return struct.pack('B', self.as_int())
@classmethod
def num_bytes(cls):
return 1
@classmethod
def deserialize(cls, b):
return cls(int(b[0]))
class KnobModeEnum (AbstractEnumValue):
_VALUES = {
'CC': 0x00,
'Aftertouch': 0x01
}
class PadModeEnum (AbstractEnumValue):
_VALUES = {
'Note': 0x00,
'Toggle CC': 0x01,
'Momentary CC': 0x02
}
#class ButtonModeEnum (AbstractEnumValue):
#
# _VALUES = {
# 'Toggle CC': 0x00,
# 'Momentary CC': 0x01
# }
class SusModeEnum (AbstractEnumValue):
_VALUES = {
'Switch': 0x00,
'Momentary': 0x01
}
|
python
|
s = input().strip()
n = int(input().strip())
a_count = s.count('a')
whole_str_reps = n // len(s)
partial_str_length = n % len(s)
partial_str = s[:partial_str_length]
partial_str_a_count = partial_str.count('a')
print(a_count * whole_str_reps + partial_str_a_count)
|
python
|
import asyncio
import math
import networkx as nx
from ccxt import async_support as ccxt
import warnings
__all__ = [
'create_multi_exchange_graph',
'create_weighted_multi_exchange_digraph',
'multi_graph_to_log_graph',
]
def create_multi_exchange_graph(exchanges: list, digraph=False):
"""
Returns a MultiGraph representing the markets for each exchange in exchanges. Each edge represents a market.
Note: does not add edge weights using the ticker's ask and bid prices.
exchange.load_markets() must have been called for each exchange in exchanges. Will throw a ccxt error if it has not.
todo: check which error.
"""
if digraph:
graph = nx.MultiDiGraph()
else:
graph = nx.MultiGraph()
for exchange in exchanges:
for market_name in exchange.symbols:
try:
base_currency, quote_currency = market_name.split('/')
# if ccxt returns a market in incorrect format (e.g FX_BTC_JPY on BitFlyer)
except ValueError:
continue
graph.add_edge(base_currency,
quote_currency,
market_name=market_name,
exchange_name=exchange.name.lower())
if digraph:
graph.add_edge(quote_currency,
base_currency,
market_name=market_name,
exchange_name=exchange.name.lower())
return graph
def create_weighted_multi_exchange_digraph(exchanges: list, name=True, log=False, fees=False, suppress=None):
"""
Not optimized (in favor of readability). There is multiple iterations over exchanges.
"""
if suppress is None:
suppress = ['markets']
if name:
exchanges = [{'object': getattr(ccxt, exchange)()} for exchange in exchanges]
else:
exchanges = [{'object': exchange} for exchange in exchanges]
try:
loop = asyncio.get_event_loop()
except RuntimeError:
loop = asyncio.get_running_loop()
futures = [asyncio.ensure_future(exchange_dict['object'].load_markets()) for exchange_dict in exchanges]
loop.run_until_complete(asyncio.gather(*futures))
if fees:
for exchange_dict in exchanges:
if 'maker' in exchange_dict['object'].fees['trading']:
# we always take the maker side because arbitrage depends on filling orders
exchange_dict['fee'] = exchange_dict['object'].fees['trading']['maker']
else:
if 'fees' not in suppress:
warnings.warn("The fees for {} have not yet been implemented into the library. "
"Values will be calculated using a 0.2% maker fee.".format(exchange_dict['object'].id))
exchange_dict['fee'] = 0.002
else:
# todo: is there a way to do this with list/ dict comprehension?
for exchange_dict in exchanges:
exchange_dict['fee'] = 0
graph = nx.MultiDiGraph()
futures = [_add_exchange_to_multi_digraph(graph, exchange, log=log, suppress=suppress) for exchange in exchanges]
loop.run_until_complete(asyncio.gather(*futures))
return graph
async def _add_exchange_to_multi_digraph(graph: nx.MultiDiGraph, exchange, log=True, suppress=None):
tasks = [_add_market_to_multi_digraph(exchange, symbol, graph, log=log, suppress=suppress)
for symbol in exchange['object'].symbols]
await asyncio.wait(tasks)
await exchange['object'].close()
# todo: refactor. there is a lot of code repetition here with single_exchange.py's _add_weighted_edge_to_graph
# todo: write tests which prove market_name is always a ticker on exchange and exchange's load_markets has been called.
# this will validate that all exceptions thrown by await exchange.fetch_ticker(market_name) are solely because of
# ccxt's fetch_ticker
async def _add_market_to_multi_digraph(exchange, market_name: str, graph: nx.DiGraph, log=True, suppress=None):
if suppress is None:
raise ValueError("suppress cannot be None. Must be a list with possible values listed in docstring of"
"create_weighted_multi_exchange_digraph. If this error shows, something likely went awry "
"during execution.")
try:
ticker = await exchange['object'].fetch_ticker(market_name)
# any error is solely because of fetch_ticker
except:
if 'markets' not in suppress:
warning = 'Market {} is unavailable at this time.'.format(market_name)
warnings.warn(warning)
return
try:
ticker_ask = ticker['ask']
ticker_bid = ticker['bid']
# ask and bid == None if this market does not exist.
except TypeError:
return
# prevent math error when Bittrex (GEO/BTC) or other API gives 0 as ticker price
if ticker_ask == 0:
return
try:
base_currency, quote_currency = market_name.split('/')
# if ccxt returns a market in incorrect format (e.g FX_BTC_JPY on BitFlyer)
except ValueError:
return
fee_scalar = 1 - exchange['fee']
if log:
# math.log raises exception when dealing with zero values
# we prefer math.log1p instead
graph.add_edge(base_currency, quote_currency,
market_name=market_name,
exchange_name=exchange['object'].id,
weight=-math.log1p(fee_scalar * ticker_bid))
graph.add_edge(quote_currency, base_currency,
market_name=market_name,
exchange_name=exchange['object'].id,
weight=-math.log1p(fee_scalar * 1 / ticker_ask))
else:
graph.add_edge(base_currency, quote_currency,
market_name=market_name,
exchange_name=exchange['object'].id,
weight=fee_scalar * ticker_bid)
graph.add_edge(quote_currency, base_currency,
market_name=market_name,
exchange_name=exchange['object'].id,
weight=fee_scalar * 1 / ticker_ask)
def multi_graph_to_log_graph(digraph: nx.MultiDiGraph):
"""
This does not work with the default version of Networkx, but with the fork available at wardbradt/Networkx
Given weighted MultiDigraph m1, returns a MultiDigraph m2 where for each edge e1 in each edge bunch eb1 of m1, the
weight w1 of e1 is replaced with log(w1) and the weight w2 of each edge e2 in the opposite edge bunch of eb is
log(1/w2)
This function is not optimized.
todo: allow this function to be used with Networkx DiGraph objects. Should not be that hard, simply return seen
from self._report in the iterator for digraph's edges() in reportviews.py as it is done for multidigraph's
edge_bunches()
"""
result_graph = nx.MultiDiGraph()
for bunch in digraph.edge_bunches(data=True, seen=True):
for data_dict in bunch[2]:
weight = data_dict.pop('weight')
# if not seen
if not bunch[3]:
result_graph.add_edge(bunch[0], bunch[1], -math.log(weight), **data_dict)
else:
result_graph.add_edge(bunch[0], bunch[1], -math.log(1/weight), **data_dict)
|
python
|
"""Methods for creating, manipulating, and storing Teradata row objects."""
import csv
from claims_to_quality.lib.qpp_logging import logging_config
from claims_to_quality.lib.teradata_methods import deidentification
import teradata
logger = logging_config.get_logger(__name__)
def csv_to_query_output(csv_path):
"""
Use csv input to mock SQL query results.
This is used to allow claim_reader to read from csv.
"""
rows = []
with open(csv_path) as file:
reader = csv.reader(file, delimiter=',', quotechar='"')
header = next(reader)
rows = convert_list_of_lists_to_teradata_rows(reader, header)
columns = {column_name: idx for idx, column_name in enumerate(header)}
return (columns, rows)
def convert_list_of_lists_to_teradata_rows(data, columns):
"""
Given a list of iterables, convert to Teradata row objects with the specified columns.
:param data: List of iterables to convert to Teradata row objects.
:param columns: List of column names for the returned rows.
"""
columns = {key: index for index, key in enumerate(columns)}
return [
teradata.util.Row(columns=columns, values=entry, rowNum=idx)
for idx, entry in enumerate(data)
]
def convert_dicts_to_teradata_rows(data):
"""
Convert a list of dictionaries to a list of Teradata row objects.
All dictionaries in the list should have the same keys.
"""
if not data:
return []
columns = {key: index for index, key in enumerate(data[0].keys())}
# Convert rows to list format as expected by the Teradata library.
rows_as_lists = []
for row in data:
row_as_list = ['0'] * len(columns)
for column_name, column_idx in columns.items():
row_as_list[column_idx] = row[column_name]
rows_as_lists.append(row_as_list)
return [
teradata.util.Row(columns=columns, values=row, rowNum=idx)
for idx, row in enumerate(rows_as_lists)
]
def to_csv(rows, csv_path, anonymize=True):
"""
Given a list of Teradata rows, output to csv with the given columns.
TODO: Specify specific columns to write to csv.
:param rows: List of Teradata row objects to be written to csv.
:param csv_path: Path of csv file to create and write to.
"""
if not rows:
logger.warn('No data to save.')
return
if anonymize:
anonymization_filter = deidentification.AnonymizationFilter()
rows = list(anonymization_filter.anonymize_rows(rows))
with open(csv_path, 'w') as f:
fieldnames = [column for column in rows[0].columns]
writer = csv.writer(f)
writer.writerow(fieldnames)
writer.writerows([row.values for row in rows])
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
=====================
General 3D Object
=====================
TODO
"""
import Axon
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
from Display3D import Display3D
from Util3D import *
from Object3D import *
class Button(Object3D):
def __init__(self, **argd):
super(Button, self).__init__(**argd)
self.grabbed = 0
# Button initialisation
caption = argd.get("caption", "Button")
self.backgroundColour = argd.get("bgcolour", (244,244,244))
self.foregroundColour = argd.get("fgcolour", (0,0,0))
self.sideColour = argd.get("sidecolour", (200,200,244))
self.margin = argd.get("margin", 8)
self.key = argd.get("key", None)
self.caption = argd.get("caption", "Button")
self.fontsize = argd.get("fontsize", 50)
self.pixelscaling = argd.get("pixelscaling", 100)
self.thickness = argd.get("thickness", 0.2)
self.eventMsg = argd.get("msg", "CLICK")
self.activated = False
self.actrot = 0
def setup(self):
self.buildCaption()
self.addListenEvents( [pygame.MOUSEMOTION, pygame.MOUSEBUTTONDOWN, pygame.MOUSEBUTTONUP ])
def draw(self):
hs = self.size/2.0
# draw faces
glBegin(GL_QUADS)
glColor4f(self.sideColour[0]/256.0, self.sideColour[1]/256.0, self.sideColour[2]/256.0, 0.5)
glVertex3f(hs.x,hs.y,hs.z)
glVertex3f(hs.x,-hs.y,hs.z)
glVertex3f(hs.x,-hs.y,-hs.z)
glVertex3f(hs.x,hs.y,-hs.z)
glVertex3f(-hs.x,hs.y,hs.z)
glVertex3f(-hs.x,-hs.y,hs.z)
glVertex3f(-hs.x,-hs.y,-hs.z)
glVertex3f(-hs.x,hs.y,-hs.z)
glVertex3f(hs.x,hs.y,hs.z)
glVertex3f(-hs.x,hs.y,hs.z)
glVertex3f(-hs.x,hs.y,-hs.z)
glVertex3f(hs.x,hs.y,-hs.z)
glVertex3f(hs.x,-hs.y,hs.z)
glVertex3f(-hs.x,-hs.y,hs.z)
glVertex3f(-hs.x,-hs.y,-hs.z)
glVertex3f(hs.x,-hs.y,-hs.z)
glEnd()
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, self.texID)
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE)
glBegin(GL_QUADS)
# back plane
glTexCoord2f(self.tex_w, 1.0-self.tex_h)
glVertex3f(hs.x,hs.y,-hs.z)
glTexCoord2f(0.0, 1.0-self.tex_h)
glVertex3f(-hs.x,hs.y,-hs.z)
glTexCoord2f(0.0, 1.0)
glVertex3f(-hs.x,-hs.y,-hs.z)
glTexCoord2f(self.tex_w, 1.0)
glVertex3f(hs.x,-hs.y,-hs.z)
# front plane
glTexCoord2f(0.0, 1.0-self.tex_h)
glVertex3f(-hs.x,-hs.y,hs.z)
glTexCoord2f(self.tex_w, 1.0-self.tex_h)
glVertex3f(hs.x,-hs.y,hs.z)
glTexCoord2f(self.tex_w, 1.0)
glVertex3f(hs.x,hs.y,hs.z)
glTexCoord2f(0.0, 1.0)
glVertex3f(-hs.x,hs.y,hs.z)
glEnd()
glDisable(GL_TEXTURE_2D)
def handleEvents(self):
while self.dataReady("inbox"):
event = self.recv("inbox")
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1 and self.ogl_name in event.hitobjects:
self.grabbed = event.button
self.scaling = Vector(0.9,0.9,0.9)
if event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
self.grabbed = 0
self.scaling = Vector(1,1,1)
#activate
if self.ogl_name in event.hitobjects:
self.send( self.eventMsg, "outbox" )
self.activated = True
def buildCaption(self):
"""Pre-render the text to go on the button label."""
# Text is rendered to self.image
pygame.font.init()
font = pygame.font.Font(None, self.fontsize)
self.image = font.render(self.caption,True, self.foregroundColour, )
# create power of 2 dimensioned surface
pow2size = (int(2**(ceil(log(self.image.get_width(), 2)))), int(2**(ceil(log(self.image.get_height(), 2)))))
textureSurface = pygame.Surface(pow2size)
textureSurface.fill( self.backgroundColour )
# determine texture coordinates
self.tex_w = float(self.image.get_width()+2*self.margin)/pow2size[0]
self.tex_h = float(self.image.get_height()+2*self.margin)/pow2size[1]
# copy image data to pow2surface
textureSurface.blit(self.image, (self.margin,self.margin))
# textureSurface.set_alpha(128)
# textureSurface = textureSurface.convert_alpha()
# read pixel data
textureData = pygame.image.tostring(textureSurface, "RGBX", 1)
self.texID = glGenTextures(1)
# create texture
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D, self.texID)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, textureSurface.get_width(), textureSurface.get_height(), 0,
GL_RGBA, GL_UNSIGNED_BYTE, textureData );
glDisable(GL_TEXTURE_2D)
if self.size is None:
self.size=Vector(self.image.get_width()/float(self.pixelscaling), self.image.get_height()/float(self.pixelscaling), self.thickness)
def steadyMovement(self):
# self.rot += self.wiggle
# if self.wiggle.x >= 0.1 or self.wiggle.x <=-0.1:
# self.wiggleadd *= -1
# self.wiggle += self.wiggleadd
if self.activated:
self.rot += Vector(3,0,0)%360
self.actrot += 3
if self.actrot >= 360:
self.actrot = 0
self.activated = False
def frame(self):
self.steadyMovement()
from SkyGrassBackground import *
if __name__=='__main__':
BUTTON1 = Button(caption="<<", msg="Previous", pos=Vector(-3,0,-10)).activate()
BUTTON2 = Button(caption=">>", msg="Next", pos=Vector(3,0,-10)).activate()
BUTTON3 = Button(caption="Play", msg="Play", pos=Vector(-1,0,-10)).activate()
BUTTON4 = Button(caption="Stop", msg="Stop", pos=Vector(1,0,-10)).activate()
bg = SkyGrassBackground(size=Vector(5000,5000,0), pos = Vector(0, 0, -100)).activate()
Axon.Scheduler.scheduler.run.runThreads()
|
python
|
# Copyright 2021 AIPlan4EU project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
import upf
from upf.model import ProblemKind
from typing import Dict, Tuple, Optional, List, Union, Type
DEFAULT_SOLVERS = {'tamer' : ('upf_tamer', 'SolverImpl'),
'pyperplan' : ('upf_pyperplan', 'SolverImpl'),
'sequential_plan_validator' : ('upf.solvers.plan_validator', 'SequentialPlanValidator'),
'grounder' : ('upf.solvers.grounder', 'Grounder')}
class Factory:
def __init__(self, solvers: Dict[str, Tuple[str, str]] = DEFAULT_SOLVERS):
self.solvers: Dict[str, Type['upf.solvers.solver.Solver']] = {}
for name, (module_name, class_name) in solvers.items():
try:
self.add_solver(name, module_name, class_name)
except ImportError:
pass
def add_solver(self, name: str, module_name: str, class_name: str):
module = importlib.import_module(module_name)
SolverImpl = getattr(module, class_name)
self.solvers[name] = SolverImpl
def _get_solver_class(self, solver_kind: str, name: Optional[str] = None,
problem_kind: ProblemKind = ProblemKind()) -> Optional[Type['upf.solvers.solver.Solver']]:
if name is not None:
return self.solvers[name]
for SolverClass in self.solvers.values():
if getattr(SolverClass, 'is_'+solver_kind)() and SolverClass.supports(problem_kind):
return SolverClass
return None
def _get_solver(self, solver_kind: str, name: Optional[str] = None,
names: Optional[List[str]] = None,
params: Union[Dict[str, str], List[Dict[str, str]]] = None,
problem_kind: ProblemKind = ProblemKind()) -> Optional['upf.solvers.solver.Solver']:
if names is not None:
assert name is None
if params is None:
params = [{} for i in range(len(names))]
assert isinstance(params, List) and len(names) == len(params)
solvers = []
for name, param in zip(names, params):
SolverClass = self._get_solver_class(solver_kind, name)
if SolverClass is None:
raise
solvers.append((SolverClass, param))
return upf.solvers.parallel.Parallel(solvers)
else:
if params is None:
params = {}
assert isinstance(params, Dict)
SolverClass = self._get_solver_class(solver_kind, name, problem_kind)
if SolverClass is None:
raise
return SolverClass(**params)
return None
def OneshotPlanner(self, *, name: Optional[str] = None,
names: Optional[List[str]] = None,
params: Union[Dict[str, str], List[Dict[str, str]]] = None,
problem_kind: ProblemKind = ProblemKind()) -> Optional['upf.solvers.solver.Solver']:
"""
Returns a oneshot planner. There are three ways to call this method:
- using 'name' (the name of a specific planner) and 'params' (planner dependent options).
e.g. OneshotPlanner(name='tamer', params={'heuristic': 'hadd'})
- using 'names' (list of specific planners name) and 'params' (list of
planners dependent options) to get a Parallel solver.
e.g. OneshotPlanner(names=['tamer', 'tamer'],
params=[{'heuristic': 'hadd'}, {'heuristic': 'hmax'}])
- using 'problem_kind' parameter.
e.g. OneshotPlanner(problem_kind=problem.kind())
"""
return self._get_solver('oneshot_planner', name, names, params, problem_kind)
def PlanValidator(self, *, name: Optional[str] = None,
names: Optional[List[str]] = None,
params: Union[Dict[str, str], List[Dict[str, str]]] = None,
problem_kind: ProblemKind = ProblemKind()) -> Optional['upf.solvers.solver.Solver']:
"""
Returns a plan validator. There are three ways to call this method:
- using 'name' (the name of a specific plan validator) and 'params'
(plan validator dependent options).
e.g. PlanValidator(name='tamer', params={'opt': 'val'})
- using 'names' (list of specific plan validators name) and 'params' (list of
plan validators dependent options) to get a Parallel solver.
e.g. PlanValidator(names=['tamer', 'tamer'],
params=[{'opt1': 'val1'}, {'opt2': 'val2'}])
- using 'problem_kind' parameter.
e.g. PlanValidator(problem_kind=problem.kind())
"""
return self._get_solver('plan_validator', name, names, params, problem_kind)
def Grounder(self, *, name: Optional[str] = None, params: Union[Dict[str, str], List[Dict[str, str]]] = None,
problem_kind: ProblemKind = ProblemKind()) -> Optional['upf.solvers.solver.Solver']:
"""
Returns a Grounder. There are three ways to call this method:
- using 'name' (the name of a specific grounder) and 'params'
(grounder dependent options).
e.g. Grounder(name='tamer', params={'opt': 'val'})
- using 'problem_kind' parameter.
e.g. Grounder(problem_kind=problem.kind())
"""
return self._get_solver('grounder', name, None, params, problem_kind)
|
python
|
preco = float(input('Qual o valor do produto ? R$ '))
porcentagem = float(input('Qual a porcentagem ? '))
calculo = preco - (preco * porcentagem / 100)
print(f'O produto que custava R${preco}, na promoção com desconto de {porcentagem}% vai custar {calculo:.2f}')
|
python
|
import json
import logging
from unittest import mock
from django.test import TestCase
from djenga.logging.formatters import JsonFormatter, JsonTaskFormatter
__all__ = [ 'JsonFormatterTest', ]
log = logging.getLogger(__name__)
class JsonFormatterTest(TestCase):
def test_json_formatter(self):
formatter = JsonFormatter()
with self.assertLogs(log) as log_context:
for handler in log.handlers:
handler.setFormatter(formatter)
log.info('Hello, Gwenna!', extra={'favorite': 'Olive'})
data = log_context.output[-1]
data = json.loads(data)
self.assertIn('timestamp', data)
self.assertEqual(data['message'], 'Hello, Gwenna!')
self.assertEqual(data['logger'],
'djenga_tests.tests.json_formatters')
self.assertEqual(data['favorite'], 'Olive')
try:
raise ValueError('test exception')
except ValueError as ex:
log.exception('%s', ex)
data = log_context.output[-1]
data = json.loads(data)
self.assertEqual(data['exception_type'], 'builtins.ValueError')
self.assertIn('test exception', data['message'])
self.assertEquals('test exception', data['exception_args'][0])
class MockTask:
class MockRequest:
id = 'olive'
request = MockRequest()
name = 'gwenna'
@mock.patch('celery._state.get_current_task',
return_value=MockTask())
def test_task_formatter(self, mock_current_task):
formatter = JsonTaskFormatter()
with self.assertLogs(log) as log_context:
for handler in log.handlers:
handler.setFormatter(formatter)
log.info('Hello, Olive!')
data = log_context.output[-1]
data = json.loads(data)
self.assertEqual(data['task_id'], 'olive')
self.assertEqual(data['task_name'], 'gwenna')
|
python
|
#!/usr/bin/env python3
'''
booksdatasource.py
Jeff Ondich, 21 September 2021
For use in the "books" assignment at the beginning of Carleton's
CS 257 Software Design class, Fall 2021.
'''
#Revised by Thea Traw
import csv
class Author:
def __init__(self, surname='', given_name='', birth_year=None, death_year=None):
self.surname = surname
self.given_name = given_name
self.birth_year = birth_year
self.death_year = death_year
def __eq__(self, other):
''' For simplicity, we're going to assume that no two authors have the same name. '''
return self.surname == other.surname and self.given_name == other.given_name
class Book:
def __init__(self, title='', publication_year=None, authors=[]):
''' Note that the self.authors instance variable is a list of
references to Author objects. '''
self.title = title
self.publication_year = publication_year
self.authors = authors
def __eq__(self, other):
''' We're going to make the excessively simplifying assumption that
no two books have the same title, so "same title" is the same
thing as "same book". '''
return self.title == other.title
class BooksDataSource:
def __init__(self, books_csv_file_name):
''' The books CSV file format looks like this:
title,publication_year,author_description
For example:
All Clear,2010,Connie Willis (1945-)
"Right Ho, Jeeves",1934,Pelham Grenville Wodehouse (1881-1975)
This __init__ method parses the specified CSV file and creates
suitable instance variables for the BooksDataSource object containing
a collection of Author objects and a collection of Book objects.
'''
self.list_of_authors = []
self.list_of_books = []
#open and read file
books_file = open(books_csv_file_name, "r")
lines_in_books_file = books_file.readlines()
books_file_length = len(lines_in_books_file)
books_file.close()
for line in lines_in_books_file:
parsed_information = BooksDataSource.parse_line(line)
title = parsed_information[0]
publication_year = parsed_information[1]
author_info = parsed_information[2]
processed_list_of_authors = BooksDataSource.process_list_of_authors(author_info)
authors_list = []
for author in processed_list_of_authors:
authors_list.append(author)
if author not in self.list_of_authors:
self.list_of_authors.append(author)
book_to_add = Book(title, publication_year, authors_list)
self.list_of_books.append(book_to_add)
def parse_line(line):
parsed_information = []
#check if there are quotation marks around title
if line.find('"') == -1:
parsed_information = line.split(",")
else:
#just in case there's also '"' in the book title
index_first_quote = line.find('"')
index_last_quote = line.rfind('"')
#everything between quotes
title = line[(index_first_quote + 1):index_last_quote]
#everything following the comma after the last quote
publication_and_author_info = line[(index_last_quote + 2):].split(",")
parsed_information.append(title)
parsed_information.append(publication_and_author_info[0])
parsed_information.append(publication_and_author_info[1])
return parsed_information
def process_author(author_info):
#separate names and birth & death year
#rfind() from https://www.w3schools.com/python/ref_string_rfind.asp
split_index = author_info.rfind(" ")
names = author_info[:split_index]
#birth and death year follow the space (" ") after the last name
birth_and_death_year = author_info[(split_index + 2):]
#just count any "middle" names as part of first name
name_split = names.rfind(" ")
given_name = names[:name_split]
surname = names[(name_split + 1):]
#for birth year and death year
birth_and_death_year_split = birth_and_death_year.split("-")
birth_year = birth_and_death_year_split[0].split("(")[0]
#assume no death year
death_year = None
if len(birth_and_death_year_split[1]) > 1: #meaning not just a ")"
death_year = birth_and_death_year_split[1].split(")")[0]
return Author(surname, given_name, birth_year, death_year)
def process_list_of_authors(author_info):
authors = []
if author_info.find(' and ') == -1:
sole_author = BooksDataSource.process_author(author_info)
authors.append(sole_author)
else:
author_strings = author_info.split(" and ")
for author_string in author_strings:
author = BooksDataSource.process_author(author_string)
authors.append(author)
return authors
def authors(self, search_text=None):
''' Returns a list of all the Author objects in this data source whose names contain
(case-insensitively) the search text. If search_text is None, then this method
returns all of the Author objects. In either case, the returned list is sorted
by surname, breaking ties using given name (e.g. Ann Brontë comes before Charlotte Brontë).
'''
selected_authors = []
if (search_text == None):
selected_authors = self.list_of_authors
else:
for author in self.list_of_authors:
if ((search_text.lower() in author.surname.lower()) or (search_text.lower() in author.given_name.lower())):
selected_authors.append(author)
#lambda sorting method from sorted() documentation: https://docs.python.org/3/howto/sorting.html
return sorted(selected_authors, key=lambda x: (x.surname, x.given_name))
def books(self, search_text=None, sort_by='title'):
''' Returns a list of all the Book objects in this data source whose
titles contain (case-insensitively) search_text. If search_text is None,
then this method returns all of the books objects.
The list of books is sorted in an order depending on the sort_by parameter:
'year' -- sorts by publication_year, breaking ties with (case-insenstive) title
'title' -- sorts by (case-insensitive) title, breaking ties with publication_year
default -- same as 'title' (that is, if sort_by is anything other than 'year'
or 'title', just do the same thing you would do for 'title')
'''
selected_books = []
if (search_text == None):
selected_books = self.list_of_books
#finding books that contain search_term
else:
for book in self.list_of_books:
if search_text.lower() in book.title.lower():
selected_books.append(book)
if (sort_by != 'year' or sort_by == 'title'):
#lambda sorting method from sorted() documentation: https://docs.python.org/3/howto/sorting.html
return sorted(selected_books, key=lambda book: (book.title))
else:
return sorted(selected_books, key=lambda book: (book.publication_year, book.title))
def books_between_years(self, start_year=None, end_year=None):
''' Returns a list of all the Book objects in this data source whose publication
years are between start_year and end_year, inclusive. The list is sorted
by publication year, breaking ties by title (e.g. Neverwhere 1996 should
come before Thief of Time 1996).
If start_year is None, then any book published before or during end_year
should be included. If end_year is None, then any book published after or
during start_year should be included. If both are None, then all books
should be included.
'''
start_year_none = False
end_year_none = False
if (start_year == 'None' or start_year == None):
start_year_none = True
if (end_year == 'None' or end_year == None):
end_year_none = True
#check for improper type in input (anything not an integer)
try:
if (start_year_none == False):
x = int(start_year)
if (end_year_none == False):
y = int(end_year)
except ValueError:
raise ValueError('sorry, invalid input') from None
quit()
selected_books = []
if (start_year_none == True and end_year_none == True):
selected_books = self.list_of_books
elif (start_year_none == True):
for book in self.list_of_books:
if (int(book.publication_year) <= int(end_year)):
selected_books.append(book)
elif (end_year_none == True):
for book in self.list_of_books:
if (int(book.publication_year) >= int(start_year)):
selected_books.append(book)
else: #neither term is None
for book in self.list_of_books:
if (int(book.publication_year) >= int(start_year) and int(book.publication_year) <= int(end_year)):
selected_books.append(book)
#lambda sorting method from sorted() documentation: https://docs.python.org/3/howto/sorting.html
return sorted(selected_books, key=lambda book: (book.publication_year, book.title))
def books_by_author(self, author):
books_by_author = []
for book in self.list_of_books:
if author in book.authors:
books_by_author.append(book)
return books_by_author
|
python
|
/home/wai/anaconda3/lib/python3.6/copy.py
|
python
|
import pytest
from ergaster import add
data = (
(1, 2, 3),
(2, 2, 4),
(3, 2, 5),
)
@pytest.mark.parametrize("x, y, res", data)
def test_add(x, y, res):
assert add(x, y) == res
|
python
|
def grafoSimples(matriz):
result = ""
l = 0
am = 0
for linha in range(len(matriz)):
for coluna in range(len(matriz[linha])):
if(linha == coluna and matriz[linha][coluna] == 2):
result+=("Há laço no vertice %s\n" %(linha+1))
l = 1
if (linha != coluna and matriz[linha][coluna] > 1):
result+=("Há aresta multiplas nos vertices %s %s\n" % ((linha + 1), (coluna + 1)))
am = 1
if am == 0 and l ==0:
result+=("É um grafos simples, pois não possui arestas multiplas e laços\n")
else:
result+=("Não é um grafos simples, pois possui arestas multiplas e laços\n")
return result
|
python
|
# coding=UTF-8
from django.db import models
from django.utils.translation import ugettext, ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from product.models import TaxClass
from l10n.models import AdminArea, Country
#from satchmo_store.shop.models import Order
#from satchmo_store.shop.signals import order_success
#from tax import Processor
from datetime import date as _date
try:
from decimal import Decimal
except:
from django.utils._decimal import Decimal
@python_2_unicode_compatible
class Taxable(models.Model):
"""
Map that says what items are taxable in a jurisdiction.
To use properly, assign products to a meaningful TaxClass, such as 'Shipping',
'Food', 'Default'. Then create rules for the jurisdictions where you are
required to collect tax. If for example, you are taxing objects in two states
and 'Food' is taxable in one and not the other, but shipping is the other
way around, you would need to create the following entries:
food = TaxClass(...)
default = TaxClass(...)
shipping = TaxClass(...)
one_state = AdminArea(...)
two_state = AdminArea(...)
usa = Country(...)
Taxable(taxClass=default, isTaxable=True, taxZone=one_state, taxCountry=usa)
Taxable(taxClass=food, isTaxable=False, useFood=True, taxZone=one_state, taxCountry=usa)
Taxable(taxClass=shipping, isTaxable=True, taxZone=one_state, taxCountry=usa)
Taxable(taxClass=default, isTaxable=True, taxZone=two_state, taxCountry=usa)
Taxable(taxClass=food, isTaxable=True, useFood=True, taxZone=two_state, taxCountry=usa)
Taxable(taxClass=shipping, isTaxable=False, taxZone=two_state, taxCountry=usa)
Laws vary drastically form state to state, so please make sure to make needed
TaxClasses for all objects that vary in taxing jurisdictions to which you
must submit.
If you do not at least create a 'Default' entry for a state, then you will
not be collecting any taxes for that state. Only create entires for states
where you are obligated to collect and report taxes.
SST defines food rates and interstate vs. intrastate rates. You may override
these, otherwise taxes will be charged at the non-food, intrastate rate by default.
WARNING: If a product is taxable in ANY jurisdiction, it must be set taxable
in the product. You disable it per-jurisdiction by disabling it here. You
cannot enable it here if it is disabled on the product itself.
"""
taxClass = models.ForeignKey(TaxClass, verbose_name=_('Tax Class'), on_delete=models.CASCADE)
taxZone = models.ForeignKey(AdminArea, blank=True, null=True,
verbose_name=_('Tax Zone'), on_delete=models.SET_NULL)
taxCountry = models.ForeignKey(Country, blank=True, null=True,
verbose_name=_('Tax Country'), on_delete=models.SET_NULL)
isTaxable = models.BooleanField(verbose_name=_('Taxable?'), default=True, )
useIntrastate = models.BooleanField(verbose_name=_('Use Intrastate rate instead of Interstate?'),
default=True)
useFood = models.BooleanField(verbose_name=_('Use food/drug rate instead of general?'),
default=False)
def _country(self):
if self.taxZone:
return self.taxZone.country.name
else:
return self.taxCountry.name
country = property(_country)
#def _display_percentage(self):
# return "%#2.2f%%" % (100*self.percentage)
#_display_percentage.short_description = _('Percentage')
#display_percentage = property(_display_percentage)
def __str__(self):
return "%s - %s = %s" % (self.taxClass,
self.taxZone and self.taxZone or self.taxCountry,
self.isTaxable)
class Meta:
verbose_name = _("Taxable Class")
verbose_name_plural = _("Taxable Classes")
JURISDICTION_CHOICES = (
(0, 'County'),
(1, 'City'),
(2, 'Town'),
(3, 'Village'),
(4, 'Borough'),
(5, 'Township'),
(9, 'Other Municipality'),
(10, 'School District'),
(11, 'Junior Colleges'),
(19, 'Other Schools'),
(20, 'Water Control'),
(21, 'Utility District'),
(22, 'Sanitation'),
(23, 'Water or Sewer District'),
(24, 'Reclamation District'),
(25, 'Fire or Police'),
(26, 'Roads or Bridges'),
(27, 'Hospitals'),
(29, 'Other Municipal Services'),
(40, 'Township and County'),
(41, 'City and School'),
(42, 'County collected by Other Taxing Authority'),
(43, 'State and County'),
(44, 'Central Collection Taxing Authority'),
(45, 'State Taxing Authority'),
(49, 'Other Combination Collection'),
(50, 'Bond Authority'),
(51, 'Annual County Bond Authority'),
(52, 'Semi-annual County Bond Authority'),
(53, 'Annual City Bond Authority'),
(54, 'Semi-annual City Bond Authority'),
(59, 'Other Bond Authority'),
(61, 'Assessment District'),
(62, 'Homeowner’s Association'),
(63, 'Special District'),
(69, 'Other Special Districts'),
(70, 'Central Appraisal Taxing Authority'),
(71, 'Unsecured County Taxes'),
(72, 'Mobile Home Authority'),
(79, 'Other Special Applications'),
)
@python_2_unicode_compatible
class TaxRate(models.Model):
"""
Records for tax rates in the default SST format as defined at:
http://www.streamlinedsalestax.org/Technology/RatesandBoundariesClean082605.pdf
"""
state = models.IntegerField(verbose_name=_('FIPS State Code'), db_index=True)
jurisdictionType = models.IntegerField(choices=JURISDICTION_CHOICES, verbose_name=_('Type'))
jurisdictionFipsCode = models.CharField(max_length=5,
verbose_name=_('FIPS Code'), db_index=True)
generalRateIntrastate = models.DecimalField(max_digits=8, decimal_places=7,
verbose_name=_('General Tax Rate - Intrastate'))
generalRateInterstate = models.DecimalField(max_digits=8, decimal_places=7,
verbose_name=_('General Tax Rate - Interstate'))
foodRateIntrastate = models.DecimalField(max_digits=8, decimal_places=7,
verbose_name=_('Food/Drug Tax Rate - Intrastate'))
foodRateInterstate = models.DecimalField(max_digits=8, decimal_places=7,
verbose_name=_('Food/Drug Tax Rate - Interstate'))
startDate = models.DateField(verbose_name=_('Effective Start Date'))
endDate = models.DateField(verbose_name=_('Effective End Date'))
class Meta:
verbose_name = _("Tax Rate")
verbose_name_plural = _("Tax Rates")
def __str__(self):
return 'State %d: Jurisdiction: %s(%s)' % (
self.state,
self.jurisdictionFipsCode,
self.get_jurisdictionType_display(),
)
def rate(self, intrastate=False, food=False):
if intrastate:
if food:
return self.foodRateIntrastate
else:
return self.generalRateIntrastate
else:
if food:
return self.foodRateInterstate
else:
return self.generalRateInterstate
TAX_BOUNDRY_CHOICES = (
('Z', 'Zip-5 Record'),
('4', 'Zip+4 Record'),
('A', 'Address Record'),
)
ODD_EVEN_CHOICES = (
('O', 'Odd'),
('E', 'Even'),
('B', 'Both'),
)
@python_2_unicode_compatible
class TaxBoundry(models.Model):
"""
Records for tax boundries in the default SST format as defined at:
http://www.streamlinedsalestax.org/Technology/RatesandBoundariesClean082605.pdf
"""
recordType = models.CharField(max_length=1, choices=TAX_BOUNDRY_CHOICES,
verbose_name=_('Boundry Type'))
startDate = models.DateField(verbose_name=_('Effective Start Date'))
endDate = models.DateField(verbose_name=_('Effective End Date'))
lowAddress = models.IntegerField(blank=True, null=True,
verbose_name=_('Low Address Range'))
highAddress = models.IntegerField(blank=True, null=True,
verbose_name=_('High Address Range'))
oddEven = models.CharField(max_length=1, blank=True, null=True, choices=ODD_EVEN_CHOICES,
verbose_name=_('Odd / Even Range Indicator'))
streetPreDirection = models.CharField(max_length=2, blank=True, null=True,
verbose_name=_('State Pre-Directional Abbr.'))
streetName = models.CharField(max_length=20, blank=True, null=True,
verbose_name=_('Street Name'))
streetSuffix = models.CharField(max_length=4, blank=True, null=True,
verbose_name=_('Street Suffix Abbr.'))
streetPostDirection = models.CharField(max_length=2, blank=True, null=True,
verbose_name=_('Street Post Directional'))
addressSecondaryAbbr = models.CharField(max_length=4, blank=True, null=True,
verbose_name=_('Address Secondary - Abbr.'))
addressSecondaryLow = models.IntegerField(blank=True, null=True,
verbose_name=_('Address Secondary - Low'))
addressSecondaryHigh = models.IntegerField(blank=True, null=True,
verbose_name=_('Address Secondary - High'))
addressSecondaryOddEven = models.CharField(max_length=1, blank=True, null=True,
choices=ODD_EVEN_CHOICES, verbose_name=_('Address Secondary - Odd/Even'))
cityName = models.CharField(max_length=28, blank=True, null=True,
verbose_name=_('City Name'))
zipCode = models.IntegerField(blank=True, null=True,
verbose_name=_('Zip Code'))
plus4 = models.IntegerField(blank=True, null=True,
verbose_name=_('Zip Code - Plus 4'))
zipCodeLow = models.IntegerField(blank=True, null=True,
verbose_name=_('Zip Code - Low'), db_index=True)
zipExtensionLow = models.IntegerField(blank=True, null=True,
verbose_name=_('Zip Code Extension - Low'), db_index=True)
zipCodeHigh = models.IntegerField(blank=True, null=True,
verbose_name=_('Zip Code - High'), db_index=True)
zipExtensionHigh = models.IntegerField(blank=True, null=True,
verbose_name=_('Zip Code Extension - High'), db_index=True)
serCode = models.CharField(max_length=5, verbose_name=_('Composite SER Code'), blank=True, null=True)
fipsStateCode = models.CharField(max_length=2, blank=True, null=True,
verbose_name=_('FIPS State Code'))
fipsStateIndicator = models.CharField(max_length=2, blank=True, null=True,
verbose_name=_('FIPS State Indicator'))
fipsCountyCode = models.CharField(max_length=3, blank=True, null=True,
verbose_name=_('FIPS County Code'))
fipsPlaceCode = models.CharField(max_length=5, blank=True, null=True,
verbose_name=_('FIPS Place Code'))
fipsPlaceType = models.CharField(max_length=2, blank=True, null=True,
verbose_name=_('FIPS Place Type'), choices=JURISDICTION_CHOICES)
special_1_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 1 code'), blank=True, null=True)
special_1_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 1 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_2_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 2 code'), blank=True, null=True)
special_2_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 2 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_3_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 3 code'), blank=True, null=True)
special_3_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 3 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_4_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 4 code'), blank=True, null=True)
special_4_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 4 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_5_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 5 code'), blank=True, null=True)
special_5_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 5 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_6_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 6 code'), blank=True, null=True)
special_6_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 6 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_7_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 7 code'), blank=True, null=True)
special_7_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 7 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_8_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 8 code'), blank=True, null=True)
special_8_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 8 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_9_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 9 code'), blank=True, null=True)
special_9_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 9 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_10_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 10 code'), blank=True, null=True)
special_10_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 10 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_11_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 11 code'), blank=True, null=True)
special_11_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 11 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_12_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 12 code'), blank=True, null=True)
special_12_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 12 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_13_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 13 code'), blank=True, null=True)
special_13_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 13 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_14_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 14 code'), blank=True, null=True)
special_14_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 14 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_15_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 15 code'), blank=True, null=True)
special_15_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 15 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_16_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 16 code'), blank=True, null=True)
special_16_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 16 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_17_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 17 code'), blank=True, null=True)
special_17_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 17 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_18_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 18 code'), blank=True, null=True)
special_18_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 18 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_19_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 19 code'), blank=True, null=True)
special_19_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 19 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_20_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 20 code'), blank=True, null=True)
special_20_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 20 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
# Fill in this property to use not-today for looking up the tax rates.
date = None
# Set these and we'll use non-default rate.
useIntrastate = None
useFood = None
def get_zip_range(self):
if self.zipExtensionLow:
return '%05d-%04d -> %05d-%04d' % (
self.zipCodeLow, self.zipExtensionLow, self.zipCodeHigh, self.zipExtensionHigh
)
else:
return '%05d -> %05d' % (self.zipCodeLow, self.zipCodeHigh)
zip_range = property(get_zip_range)
def rates(self, date=None):
l = list()
state = self.fipsStateCode
if not date:
date = _date.today()
# Lookup all the applicable codes.
for fips in (
self.fipsStateIndicator, self.fipsCountyCode, self.fipsPlaceCode,
self.special_1_code, self.special_2_code, self.special_3_code,
self.special_4_code, self.special_5_code, self.special_6_code,
self.special_7_code, self.special_8_code, self.special_9_code,
self.special_10_code, self.special_11_code, self.special_12_code,
self.special_13_code, self.special_14_code, self.special_15_code,
self.special_16_code, self.special_17_code, self.special_18_code,
self.special_19_code, self.special_20_code
):
if not fips:
continue
rate = TaxRate.objects.get(
state=state,
jurisdictionFipsCode=fips,
startDate__lte=date,
endDate__gte=date,
)
l.append( rate )
return l
def get_percentage(self, date=None):
"""
Emulate being a tax rate by returning a total percentage to tax the customer.
"""
pct = Decimal('0.00')
for x in self.rates(date):
pct += x.rate(intrastate=self.useIntrastate, food=self.useFood)
return pct
percentage=property(get_percentage)
def __str__(self):
if self.recordType == 'Z':
return 'TaxBoundry(Z): %i -- %i' % (
self.zipCodeLow, self.zipCodeHigh
)
elif self.recordType == '4':
return 'TaxBoundry(4): %i-%i -- %i-%i' % (
self.zipCodeLow, self.zipExtensionLow,
self.zipCodeHigh, self.zipExtensionHigh,
)
else:
return 'TaxBoundry(A)'
@classmethod
def lookup(cls, zip, ext=None, date=None):
"""Handy function to take a zip code and return the appropriate rates
for it."""
if not date:
date = _date.today()
# Try for a ZIP+4 lookup first if we can.
if ext:
try:
return cls.objects.get(
recordType='4',
zipCodeLow__lte=zip,
zipCodeHigh__gte=zip,
zipExtensionLow__lte=ext,
zipExtensionHigh__gte=ext,
startDate__lte=date,
endDate__gte=date,
)
except cls.DoesNotExist:
# Not all zip+4 have entires. That's OK.
pass
# Try for just the ZIP then.
try:
return cls.objects.get(
recordType='Z',
zipCodeLow__lte=zip,
zipCodeHigh__gte=zip,
startDate__lte=date,
endDate__gte=date,
)
except cls.DoesNotExist:
return None
class Meta:
verbose_name = _("Tax Boundry")
verbose_name_plural = _("Tax Boundries")
#class TaxCollected(models.Model):
# order = models.ForeignKey(Order, verbose_name=_("Order"))
# taxRate = models.ForeignKey(TaxRate, verbose_name=_('Tax Rate'))
# useIntrastate = models.BooleanField(verbose_name=_('Use Intrastate rate instead of Interstate?'),
# default=True)
# useFood = models.BooleanField(verbose_name=_('Use food/drug rate instead of general?'),
# default=False)
#
#def save_taxes_collected(order, **kwargs):
# processor = Processor(order=order)
# tb = processor.get_boundry()
#
#order_success.connect(save_taxes_colletecd)
from . import config
|
python
|
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import colorcet as cc
import datashader as ds
import datashader.utils as utils
import datashader.transfer_functions as tf
sns.set(context="paper", style="white")
data_dir = os.path.abspath("./data")
data_fname = os.path.join(data_dir, "cellgraph_embedding.csv")
save_dir = os.path.abspath("./plots")
save = True
fmt = "png"
dpi = 300
pal = [
"#9e0142",
"#d8434e",
"#f67a49",
"#fdbf6f",
"#feeda1",
"#f1f9a9",
"#bfe5a0",
"#74c7a5",
"#378ebb",
"#5e4fa2",
]
#color_key = {str(d): c for d, c in enumerate(pal)}
color_key = pal
#color_key = plt.get_cmap("Set1").colors
print(f"Reading data from {data_fname}")
df = pd.read_csv(data_fname)
df["n_components"] = np.random.randint(1, 5, size=df.shape[0])
df["n_components"] = df["n_components"].astype("category")
print("Plotting with Datashader")
cvs = ds.Canvas(plot_width=400, plot_height=400)
agg = cvs.points(df, "umap_x", "umap_y", ds.count_cat("n_components"))
img = tf.shade(agg, color_key=color_key, how="linear")
if save:
# Save UMAP as image
imname = "cellgraph_embedding_image"
impath = os.path.join(save_dir, imname)
utils.export_image(img, filename=impath, background="white")
# Make plot from image
fname = "cellgraph_embedding_plot"
fpath = os.path.join(save_dir, fname + "." + fmt)
image = plt.imread(impath + ".png")
fig, ax = plt.subplots(figsize=(6, 6))
plt.imshow(image)
plt.setp(ax, xticks=[], yticks=[])
plt.title("UMAP of tissue topologies ($5\mathrm{x}5$)", fontsize=12)
print(f"Saving figure to {fpath}")
plt.savefig(fpath, format=fmt, dpi=dpi)
|
python
|
# -*- coding: utf-8 -*-
from .base_settings import *
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
ALLOWED_HOSTS = ['*']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
FOP_EXECUTABLE = "C:/Users/ria/Downloads/fop-2.3/fop/fop.cmd"
GRAPPELLI_INDEX_DASHBOARD = 'projectsettings.dashboard.CustomIndexDashboard'
KOALIXCRM_REST_API_AUTH = True
|
python
|
import json
from dagster import ModeDefinition, execute_solid, solid
from dagster_slack import slack_resource
from mock import patch
@patch("slack.web.base_client.BaseClient._perform_urllib_http_request")
def test_slack_resource(mock_urllib_http_request):
@solid(required_resource_keys={"slack"})
def slack_solid(context):
assert context.resources.slack
body = {"ok": True}
mock_urllib_http_request.return_value = {
"status": 200,
"body": json.dumps(body),
"headers": "",
}
context.resources.slack.chat_postMessage(channel="#random", text=":wave: hey there!")
assert mock_urllib_http_request.called
result = execute_solid(
slack_solid,
run_config={
"resources": {"slack": {"config": {"token": "xoxp-1234123412341234-12341234-1234"}}}
},
mode_def=ModeDefinition(resource_defs={"slack": slack_resource}),
)
assert result.success
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.