content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import logging
from clx.analytics import detector_utils as du
log = logging.getLogger(__name__)
class DetectorDataset(object):
"""
Wrapper class is used to hold the partitioned datframes and number of the records in all partitions.
"""
def __init__(self, df, batch_size):
"""This function instantiates partitioned datframes and number of the records in all partitions.
:param df: domains dataframe.
:type df: cudf.DataFrame
:param batch_size: Number of records in the dataframe.
:type batch_size: int
"""
self.__partitioned_dfs, self.__dataset_len = self.__get_partitioned_dfs(
df, batch_size
)
@property
def partitioned_dfs(self):
return self.__partitioned_dfs
@property
def dataset_len(self):
return self.__dataset_len
# https://github.com/rapidsai/cudf/issues/2861
# https://github.com/rapidsai/cudf/issues/1473
# Workaround for partitioning dataframe into small batches
def __get_partitioned_dfs(self, df, batch_size):
"""Partition one dataframe to multiple small dataframes based on a given batch size.
:param df: Contains domains and it's types.
:type df: cudf.DataFrame
:param batch_size: Number of records has to be in each partitioned dataframe.
:type batch_size: int
"""
dataset_len = df["domain"].count()
df = du.str2ascii(df, dataset_len)
prev_chunk_offset = 0
partitioned_dfs = []
while prev_chunk_offset < dataset_len:
curr_chunk_offset = prev_chunk_offset + batch_size
chunk = df.iloc[prev_chunk_offset:curr_chunk_offset:1]
partitioned_dfs.append(chunk)
prev_chunk_offset = curr_chunk_offset
return partitioned_dfs, dataset_len
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from dataclasses import dataclass
import os
from datasets import registry as datasets_registry
from foundations import desc
from foundations import hparams
from foundations.step import Step
from lottery.desc import LotteryDesc
from platforms.platform import get_platform
@dataclass
class TrainingDesc(desc.Desc):
"""The hyperparameters necessary to describe a training run."""
model_hparams: hparams.ModelHparams
dataset_hparams: hparams.DatasetHparams
training_hparams: hparams.TrainingHparams
@staticmethod
def name_prefix(): return 'train'
@staticmethod
def add_args(parser: argparse.ArgumentParser, defaults: LotteryDesc = None):
hparams.DatasetHparams.add_args(parser, defaults=defaults.dataset_hparams if defaults else None)
hparams.ModelHparams.add_args(parser, defaults=defaults.model_hparams if defaults else None)
hparams.TrainingHparams.add_args(parser, defaults=defaults.training_hparams if defaults else None)
@staticmethod
def create_from_args(args: argparse.Namespace) -> 'TrainingDesc':
dataset_hparams = hparams.DatasetHparams.create_from_args(args)
model_hparams = hparams.ModelHparams.create_from_args(args)
training_hparams = hparams.TrainingHparams.create_from_args(args)
return TrainingDesc(model_hparams, dataset_hparams, training_hparams)
def str_to_step(self, s: str) -> Step:
return Step.from_str(s, datasets_registry.iterations_per_epoch(self.dataset_hparams))
@property
def end_step(self):
iterations_per_epoch = datasets_registry.iterations_per_epoch(self.dataset_hparams)
return Step.from_str(self.training_hparams.training_steps, iterations_per_epoch)
@property
def train_outputs(self):
datasets_registry.num_classes(self.dataset_hparams)
def run_path(self, replicate, experiment='main'):
return os.path.join(get_platform().root, self.hashname, f'replicate_{replicate}', experiment)
@property
def display(self):
return '\n'.join([self.dataset_hparams.display, self.model_hparams.display, self.training_hparams.display])
|
python
|
import logging
import numpy as np
import paddle
from ..common import get_logger
from .var_group import *
from .pruning_plan import *
from .filter_pruner import FilterPruner
__all__ = ['L1NormFilterPruner']
_logger = get_logger(__name__, logging.INFO)
class L1NormFilterPruner(FilterPruner):
def __init__(self, model, input_shape, sen_file=None):
super(L1NormFilterPruner, self).__init__(
model, input_shape, sen_file=sen_file)
def cal_mask(self, var_name, pruned_ratio, group):
value = group[var_name]['value']
pruned_dims = group[var_name]['pruned_dims']
reduce_dims = [
i for i in range(len(value.shape)) if i not in pruned_dims
]
l1norm = np.mean(np.abs(value), axis=tuple(reduce_dims))
sorted_idx = l1norm.argsort()
pruned_num = int(round(len(sorted_idx) * pruned_ratio))
pruned_idx = sorted_idx[:pruned_num]
mask_shape = [value.shape[i] for i in pruned_dims]
mask = np.ones(mask_shape, dtype="int32")
mask[pruned_idx] = 0
return mask
|
python
|
# Component to translate alias values into strings
import os, re, logging, json
from pathlib import Path, PosixPath, WindowsPath
_ValAliases = {} # Keeps track of classes which have been registered
def addAliasClass(aliasClass, tag=None):
'''Add a class to the supported alias list so that the ValAlias.makeAlias
method can generate the aliases as needed from text.
Args:
- aliasClass (type): The class to add to the list of supported aliases.
Must be a subclass of the ValAlias class
- [tag=None (str|None)]: The text string (should be lowercase) which
indicates this class. If not specified, the TAG value of the
aliasClass is used instead
Raises:
- ValueError:
- The provided aliasClass is not a subclass of a ValAlias
- A nalias class is already associated with the given tag
- No tag was specified
'''
# Ensure the alias class is indeed an alias class
if not isinstance(aliasClass, type):
raise ValueError('Provided alias class is not a class')
if not issubclass(aliasClass, ValAlias):
raise ValueError('Provided alias class "{0:s}" is not a subclass of a ValAlias'.format(str(aliasClass)))
# If we do not have a tag, get the tag of the alias class
if tag is None:
try:
tag = aliasClass.TAG
except AttributeError:
raise ValueError('No tag provided when adding alias class "{0:s}"'.format(str(aliasClass)))
# Convert tag to string
tag = str(tag)
# Ensure we are not adding a duplicate entry
if tag in _ValAliases:
raise ValueError('Cannot track alias class "{0:s}" with tag "{1:s}" as another alias class ("{2:s}") with that tag already exists'.format(
str(aliasClass), tag, str(_ValAliases[tag])))
# Add the entry
logging.info('Adding alias class {0:s} under tag "{1:s}"'.format(aliasClass.__name__, tag))
_ValAliases[tag] = aliasClass
class ValAlias(object):
'''Class to handle parsing an alias spec in a value string
Alias specs follow the given format:
{alias_name:options}
Where
- alias_name (str): The name of the alias to use
- options (str): An options string specifying the options for the alias
Please note that escaped braces like \\{ and \\} will be ignored and treated
as braces literals
Attributes:
- type (str): The main type of the alias
- subtypes (list<str>): Qualifiers to the main type
- parameters (dict<str,*>): Dictionary of parameters the alias uses to
evaluate itself
- file (Path): The file which defined this alias
'''
@staticmethod
def makeAliasDictionary(file):
'''Create an alias dictionary from an alias dictionary .json file
Args:
- file (str|Path): The path to the file to create the dictionary
from
Returns:
- dict<ValAlias>: The aliases contained in the file, indexed by
their names
Raises:
- FileNotFoundError: The provided file could not be found
- ValueError:
- The provided file format does not match an alias dictionary
file
- One or more alias definitions is not formed correctly
'''
# Ensure we have the file
file = Path(file)
if not file.is_file():
raise FileNotFoundError('Could not find file "{0:s}" for making an alias dictionary'.format(str(file)))
# Open and parse the file
ret = {}
with open(file, 'r') as json_file:
aliases = json.load(json_file)
for alias_name in aliases:
ret[alias_name] = ValAlias.makeAlias(aliases[alias_name], file)
# Return output dictionary
return ret
@staticmethod
def makeAlias(definition, file):
'''Create an alias from a given definition
Args:
- definition (dict<str,str>): The definition of the alias. Can
contain:
- 'type': (str) (required) The main type of the alias
- 'subtypes': (list<str>) (optional) Qualifiers to the main
type for the alias
- other: (*) (optional) Parameters the alias uses to evaluate
itself
- file (str|Path): The absolute file path to the file the alias is
defined in.
Return:
- ValAlias: An alias object
Raises:
- ValueError:
- No alias of the given type has been found
- The alias cannot be created from the given dictionary
'''
# Ensure we can find the relevant class
if not 'type' in definition:
raise ValueError('Definition for alias does not specify type')
if not definition['type'] in _ValAliases:
raise ValueError('Could not find alias of type "{0:s}" in list of supported aliases'.format(definition['type']))
# Make and return the alias
return _ValAliases[definition['type']](definition, file)
@staticmethod
def evaluateAliases(value, aliases, parents=None):
'''Parse a value string, replacing aliases with their desired values
Args:
- value (str): The string to replace aliases for
- aliases (list<dict<str,ValAlias>>): The alias dictionaries to use.
If an alias is used across multiple dictionaries, the last
dictionary to define that alias will be used.
- parents=[] (list<str>): A history of aliases whose evaluation
depends on the given value being evaluated
Return:
- (str): The value string with aliases replaced
Raises:
- ValueError: An issue occurred trying to evaluate an alias
- NotImplementedError: One of the provided aliases cannot be
evaluated
'''
# Need to make this list so we can traverse it backwards
alias_iterations = []
if parents is None:
parents = []
# Debug log
parentstr = ''
for parent in parents:
parentstr += parent + ' '
parentstr = parentstr[:-1]
logging.debug('Evaluating aliase "{0:s}" [{1:s}]'.format(value, parentstr))
# Get each alias option and iterate though
alias_pattern = re.compile(r"(?<!\\)\{([^:\}]+)(:([^\{\}]+))?(?<!\\)\}", re.MULTILINE)
alias_strings = alias_pattern.finditer(value)
for alias_m in alias_strings:
# Get the name / options from the match
alias_name = value[alias_m.start(1):alias_m.end(1)]
alias_options = value[alias_m.start(3):alias_m.end(3)]
# Check for circular dependency
if alias_name in parents:
raise ValueError('A circular dependency exists while evaluating alias "{0:s}"'.format(alias_name))
new_parents = parents.copy()
new_parents.append(alias_name)
# Find the matching alias
alias = None
for alias_dict in aliases:
if alias_name in alias_dict:
alias = alias_dict[alias_name]
# Throw exception if we could not find the right alias
if alias is None:
raise ValueError('Could not find alias named "{0:s}" in provided alias dictionaries'.format(alias_name))
# Otherwise evaluate the alias
converted_value = alias.evaluate(alias_options, aliases, new_parents)
# Add it to the list
alias_iterations.append((converted_value, alias_m))
# Going backwards, replace the values in the string
for (converted_value,alias_m) in reversed(alias_iterations):
value = value[:alias_m.start(0)] + converted_value + value[alias_m.end(0):]
logging.debug(value)
# Return
logging.debug(value)
return value.replace('\\{', '{').replace('\\}', '}')
def __init__(self, definition, file):
'''Fill out the basic attributes of the alias
Args:
- definition (dict<str,str>): The definition of the alias. Can
contain:
- 'type': (str) (required) The main type of the alias
- 'subtypes': (list<str>) (optional) Qualifiers to the main
type for the alias
- other: (*) (optional) Parameters the alias uses to evaluate
itself
- file (str|Path): The absolute file path to the file the alias is
defined in.
Raises:
- ValueError: The provided dictionary does not have a 'type' entry
'''
# Save the file
self.file = Path(file)
# Ensure we have a type
if not 'type' in definition:
raise ValueError('Cannot make alias without a specified type')
self.type = definition['type']
# Save details
self.subtypes = []
self.parameters = {}
for parameter in definition:
pvalue = definition[parameter]
if parameter == 'type':
# We've already saved the type
continue
elif parameter == 'subtypes':
# Get the subtypes; either append (for a single value) or copy list
if not isinstance(pvalue, list):
self.subtypes.append(pvalue)
else:
self.subtypes = pvalue
else:
# Copy over remaining parameters
self.parameters[parameter] = pvalue
def evaluate(self, options, other_aliases, parents):
'''Extract the string value an alias evaluates to
TODO: Check to make sure that infinite alias recursion is not possible.
Arguments:
- options (str): The string text for a given option
- other_aliases (list<dict<str,ValAlias>>): The other aliases
available for use when defining values. This is useful for
recursive aliases.
- parents (list<str>): A history of aliases whose evaluation depends
on the given value being evaluated
Return:
- str: The value the alias evaluates to
Raises:
- ValueError:
- The alias does not have enough options to evaluate
- The alias's options are not in the correct format for the
alias
- An issue occurred evaluating the alias
- A circular dependency exists in this evaluation
- NotImplementedError: this type of alias cannot be evaluated
'''
raise NotImplementedError('Cannot evaluate alias of base value alias class')
class StringAlias(ValAlias):
'''Class to handle a basic string replacement alias
Attributes:
- value (str): The string value this alias evaluates to
'''
TAG = 'string'
def __init__(self, definition, file):
'''Fill out the basic attributes of the alias
Subtypes:
Parameters:
- value (str): The value
Args:
- definition (dict<str,str>): The definition of the alias. Can
contain:
- 'type': (str) (required) The main type of the alias
- 'subtypes': (list<str>) (optional) Qualifiers to the main
type for the alias
- other: (*) (optional) Parameters the alias uses to evaluate
itself
- file (str|Path): The absolute file path to the file the alias is
defined in.
Raises:
- ValueError: The alias cannot be created from the given dictionary
'''
# Call the base constructor
super(StringAlias, self).__init__(definition, file)
# Ensure we have a value
if not 'value' in self.parameters:
raise ValueError('Cannot make a string alias without a value parameter')
# Set the value
self.value = self.parameters['value']
def evaluate(self, options, other_aliases, parents):
'''Extract the string value an alias evaluates to
TODO: Check to make sure that infinite alias recursion is not possible.
Arguments:
- options (str): The string text for a given option
- other_aliases (list<dict<str,ValAlias>>): The other aliases
available for use when defining values. This is useful for
recursive aliases.
- parents (list<str>): A history of aliases whose evaluation depends
on the given value being evaluated
Return:
- str: The value the alias evaluates to
Raises:
- ValueError:
- The alias does not have enough options to evaluate
- The alias's options are not in the correct format for the
alias
- An issue occurred evaluating the alias
- A circular dependency exists in this evaluation
- NotImplementedError: this type of alias cannot be evaluated
'''
return ValAlias.evaluateAliases(self.value, other_aliases, parents)
addAliasClass(StringAlias)
class PathAlias(ValAlias):
'''Alias to represent a path
Attributes:
- path (Path): The path to use for evaluation
'''
TAG = 'path'
def __init__(self, definition, file):
'''Fill out the basic attributes of the alias
Subtypes:
abs|rel, [dir|file]
Where the subtypes mean
- abs: The path should be evaluated as an absolute file
- rel: The path should be treated as a relative path
- dir: The path is a directory
- file: The path is a file
Parameters:
- value (str): The string value of the path
Args:
- definition (dict<str,str>): The definition of the alias. Can
contain:
- 'type': (str) (required) The main type of the alias
- 'subtypes': (list<str>) (optional) Qualifiers to the main
type for the alias
- other: (*) (optional) Parameters the alias uses to evaluate
itself
- file (str|Path): The absolute file path to the file the alias is
defined in.
Raises:
- ValueError: The alias cannot be created from the given dictionary
'''
# Call the base constructor
super(PathAlias, self).__init__(definition, file)
# Ensure we have a value
if not 'value' in self.parameters:
raise ValueError('Cannot make a path alias without a value parameter')
# Get the path
self.path = Path(self.parameters['value'])
# Make absolute if required, evaluated relative to definition file
if 'abs' in self.subtypes:
if not self.path.is_absolute():
self.path = self.file.parent.joinpath(self.path)
def evaluate(self, options, other_aliases, parents):
'''Extract the string value an alias evaluates to
TODO: Check to make sure that infinite alias recursion is not possible.
Arguments:
- options (str): The string text for a given option
- other_aliases (list<dict<str,ValAlias>>): The other aliases
available for use when defining values. This is useful for
recursive aliases.
- parents (list<str>): A history of aliases whose evaluation depends
on the given value being evaluated
Return:
- str: The value the alias evaluates to
Raises:
- ValueError:
- The alias does not have enough options to evaluate
- The alias's options are not in the correct format for the
alias
- An issue occurred evaluating the alias
- A circular dependency exists in this evaluation
- NotImplementedError: this type of alias cannot be evaluated
'''
# Get the output string
output = ValAlias.evaluateAliases(str(self.path), other_aliases, parents)
# Add slash for directories if a slash is not already there
if 'dir' in self.subtypes:
if not output[-1] == '/' and not output[-1] == '\\':
output += '/' if isinstance(self.path, PosixPath) else '\\'
return output
addAliasClass(PathAlias)
|
python
|
import json
import multiprocessing
import os
import shutil
from typing import Dict, List, Tuple
import cv2
import numpy as np
from flask import request
from tensorpack.utils import logger
from tqdm import tqdm
from werkzeug import FileStorage
from werkzeug.utils import secure_filename
from zipfile import ZipFile
from mot.object_detection.query_server import \
localizer_tensorflow_serving_inference
from mot.tracker.object_tracking import ObjectTracking
from mot.tracker.video_utils import read_folder, split_video
SERVING_URL = "http://localhost:8501" # the url where the tf-serving container exposes the model
UPLOAD_FOLDER = 'tmp' # folder used to store images or videos when sending files
FPS = 4
RESOLUTION = (1024, 768)
CLASS_NAMES = ["bottles", "others", "fragments"]
SUM_THRESHOLD = 0.6 # the sum of scores for all classes must be greater than this value
# for the prediction to be kept
CLASS_TO_THRESHOLD = {"bottles": 0.4, "others": 0.3, "fragments": 0.3}
CPU_COUNT = min(int(multiprocessing.cpu_count() / 2), 32)
def handle_post_request(upload_folder: str = UPLOAD_FOLDER) -> Dict[str, np.array]:
"""This method is the first one to be called when a POST request is coming. It analyzes the incoming
format (file or JSON) and then call the appropiate methods to do the prediction.
If you want to make a prediction by sending the data as a JSON, it has to be in this format:
```json
{"image":[[[0,0,0],[0,0,0]],[[0,0,0],[0,0,0]]]}
```
or
```json
{"video": TODO}
```
Arguments:
- *upload_folder*: Where the files are temporarly stored
Returns:
- *Dict[str, np.array]*: The predictions of the TF serving module
Raises:
- *NotImplementedError*: If the format of data isn't handled yet
"""
if "file" in request.files:
return handle_file(request.files['file'], upload_folder, **request.form)
data = json.loads(request.data.decode("utf-8"))
if "image" in data:
image = np.array(data["image"])
return {"detected_trash": predict_and_format_image(image)}
if "video" in data:
raise NotImplementedError("video")
raise ValueError(
"Error during the reading of JSON. Keys {} aren't valid ones.".format(data.keys()) +
"For an image, send a JSON such as {'image': [0, 0, 0]}." +
"Sending videos over JSON isn't implemented yet."
)
def handle_file(
file: FileStorage,
upload_folder: str = UPLOAD_FOLDER,
fps: int = FPS,
resolution: Tuple[int, int] = RESOLUTION,
**kwargs
) -> Dict[str, np.array]:
"""Make the prediction if the data is coming from an uploaded file.
Arguments:
- *file*: The file, can be either an image or a video, or a zipped folder
- *upload_folder*: Where the files are temporarly stored
Returns:
- for an image: a json of format
```json
{
"image": filename,
"detected_trash":
[
{
"box": [1, 1, 2, 20],
"label": "fragments",
"score": 0.92
}, {
"box": [10, 10, 25, 20],
"label": "bottles",
"score": 0.75
}
]
}
```
- for a video or a zipped file: a json of format
```json
{
"video_length": 132,
"fps": 2,
"video_id": "GOPRO1234.mp4",
"detected_trash":
[
{
"label": "bottles",
"id": 0,
"frame_to_box": {
23: [0, 0, 1, 10],
24: [1, 1, 4, 13]
}
}, {
"label": "fragments",
"id": 1,
"frame_to_box": {
12: [10, 8, 9, 15]
}
}
]
}
```
Raises:
- *NotImplementedError*: If the format of data isn't handled yet
"""
if kwargs:
logger.warning("Unused kwargs: {}".format(kwargs))
filename = secure_filename(file.filename)
full_filepath = os.path.join(upload_folder, filename)
if not os.path.isdir(upload_folder):
os.mkdir(upload_folder)
if os.path.isfile(full_filepath):
os.remove(full_filepath)
file.save(full_filepath)
file_type = file.mimetype.split("/")[0]
# mimetype is for example 'image/png' and we only want the image
if file_type == "image":
image = cv2.imread(full_filepath) # cv2 opens in BGR
os.remove(full_filepath) # remove it as we don't need it anymore
try:
detected_trash = predict_and_format_image(image)
except ValueError as e:
return {"error": str(e)}
return {"image": filename, "detected_trash": detected_trash}
elif file_type in ["video", "application"]:
folder = None
if file.mimetype == "application/zip":
# zip case
ZipFile(full_filepath).extractall(upload_folder)
dirname = None
with ZipFile(full_filepath, 'r') as zipObj:
listOfFileNames = zipObj.namelist()
for fileName in listOfFileNames:
dirname = os.path.dirname(fileName)
zipObj.extract(fileName, upload_folder)
folder = os.path.join(upload_folder, dirname)
else:
# video case: splitting video and saving frames
folder = os.path.join(upload_folder, "{}_split".format(filename))
if os.path.isdir(folder):
shutil.rmtree(folder)
os.mkdir(folder)
logger.info("Splitting video {} to {}.".format(full_filepath, folder))
split_video(full_filepath, folder, fps=fps, resolution=resolution)
print("folder:", folder, "uplaod_folder:", upload_folder, "file.filename:", file.filename)
image_paths = read_folder(folder)
if len(image_paths) == 0:
raise ValueError("No output image")
# making inference on frames
logger.info("{} images to analyze on {} CPUs.".format(len(image_paths), CPU_COUNT))
try:
with multiprocessing.Pool(CPU_COUNT) as p:
inference_outputs = list(
tqdm(
p.imap(process_image, image_paths),
total=len(image_paths),
)
)
except ValueError as e:
return {"error": str(e)}
logger.info("Finish analyzing video {}.".format(full_filepath))
# tracking objects
logger.info("Starting tracking.")
object_tracker = ObjectTracking(filename, image_paths, inference_outputs, fps=fps)
tracks = object_tracker.compute_tracks()
logger.info("Tracking finished.")
return object_tracker.json_result(tracks)
else:
raise NotImplementedError(file_type)
def process_image(image_path: str) -> Dict[str, object]:
"""Function used to open and predict on an image. It is suposed to be used in multiprocessing.
Arguments:
- *image_path*
Returns:
- *Dict[str, object]*: Predictions for this image path
```python
predictions = {
'output/boxes:0': [[0, 0, 1, 1], [0, 0, 10, 10], [10, 10, 15, 100]],
'output/labels:0': [3, 1, 2], # the labels start at 1 since 0 is for background
'output/scores:0': [0.98, 0.87, 0.76] # sorted in descending order
}
```
"""
image = cv2.imread(image_path) # cv2 opens in BGR
return localizer_tensorflow_serving_inference(image, SERVING_URL, return_all_scores=True)
def predict_and_format_image(
image: np.ndarray,
class_names: List[str] = CLASS_NAMES,
class_to_threshold: Dict[str, float] = CLASS_TO_THRESHOLD
) -> List[Dict[str, object]]:
"""Make prediction on an image and return them in a human readable format.
Arguments:
- *image*: An numpy array in BGR
- *class_names*: The list of class names without background
- *class_to_threshold*: A dict assigning class names to threshold. If a class name isn't in
this dict, no threshold will be applied, which means that all predictions for this class
will be kept.
Returns:
- *List[Dict[str, object]]*: List of dicts such as:
```python3
{
"box": [1, 1, 2, 20],
"label": "fragments",
"score": 0.92
}
```
"""
class_names = ["BG"] + class_names
outputs = localizer_tensorflow_serving_inference(image, SERVING_URL, return_all_scores=False)
detected_trash = []
for box, label, score in zip(
outputs["output/boxes:0"], outputs["output/labels:0"], outputs["output/scores:0"]
):
if keep_prediction(class_names, label, class_to_threshold, score):
trash_json = {
"box": [round(coord, 2) for coord in box],
"label": class_names[label],
"score": score,
}
detected_trash.append(trash_json)
return detected_trash
def keep_prediction(class_names, label, class_to_threshold, score):
if isinstance(score, list): # we have scores for all classes
if np.array(score).sum() < SUM_THRESHOLD:
return False
return True
return class_names[label] not in class_to_threshold or score >= class_to_threshold[
class_names[label]]
|
python
|
import numpy
def print_table(table, path):
f = open(path, 'w')
for row in range(len(table)):
for col in range(len(table[row])):
f.write(str(table[row][col]))
f.write(' ')
print(table[row][col], end=' ')
if col == len(table[row])-1:
print("\n")
f.write('\n')
S0_Box = (
(0x3e,0x72,0x5b,0x47,0xca,0xe0,0x00,0x33,0x04,0xd1,0x54,0x98,0x09,0xb9,0x6d,0xcb),
(0x7b,0x1b,0xf9,0x32,0xaf,0x9d,0x6a,0xa5,0xb8,0x2d,0xfc,0x1d,0x08,0x53,0x03,0x90),
(0x4d,0x4e,0x84,0x99,0xe4,0xce,0xd9,0x91,0xdd,0xb6,0x85,0x48,0x8b,0x29,0x6e,0xac),
(0xcd,0xc1,0xf8,0x1e,0x73,0x43,0x69,0xc6,0xb5,0xbd,0xfd,0x39,0x63,0x20,0xd4,0x38),
(0x76,0x7d,0xb2,0xa7,0xcf,0xed,0x57,0xc5,0xf3,0x2c,0xbb,0x14,0x21,0x06,0x55,0x9b),
(0xe3,0xef,0x5e,0x31,0x4f,0x7f,0x5a,0xa4,0x0d,0x82,0x51,0x49,0x5f,0xba,0x58,0x1c),
(0x4a,0x16,0xd5,0x17,0xa8,0x92,0x24,0x1f,0x8c,0xff,0xd8,0xae,0x2e,0x01,0xd3,0xad),
(0x3b,0x4b,0xda,0x46,0xeb,0xc9,0xde,0x9a,0x8f,0x87,0xd7,0x3a,0x80,0x6f,0x2f,0xc8),
(0xb1,0xb4,0x37,0xf7,0x0a,0x22,0x13,0x28,0x7c,0xcc,0x3c,0x89,0xc7,0xc3,0x96,0x56),
(0x07,0xbf,0x7e,0xf0,0x0b,0x2b,0x97,0x52,0x35,0x41,0x79,0x61,0xa6,0x4c,0x10,0xfe),
(0xbc,0x26,0x95,0x88,0x8a,0xb0,0xa3,0xfb,0xc0,0x18,0x94,0xf2,0xe1,0xe5,0xe9,0x5d),
(0xd0,0xdc,0x11,0x66,0x64,0x5c,0xec,0x59,0x42,0x75,0x12,0xf5,0x74,0x9c,0xaa,0x23),
(0x0e,0x86,0xab,0xbe,0x2a,0x02,0xe7,0x67,0xe6,0x44,0xa2,0x6c,0xc2,0x93,0x9f,0xf1),
(0xf6,0xfa,0x36,0xd2,0x50,0x68,0x9e,0x62,0x71,0x15,0x3d,0xd6,0x40,0xc4,0xe2,0x0f),
(0x8e,0x83,0x77,0x6b,0x25,0x05,0x3f,0x0c,0x30,0xea,0x70,0xb7,0xa1,0xe8,0xa9,0x65),
(0x8d,0x27,0x1a,0xdb,0x81,0xb3,0xa0,0xf4,0x45,0x7a,0x19,0xdf,0xee,0x78,0x34,0x60)
)
S1_Box = (
(0x55,0xc2,0x63,0x71,0x3b,0xc8,0x47,0x86,0x9f,0x3c,0xda,0x5b,0x29,0xaa,0xfd,0x77),
(0x8c,0xc5,0x94,0x0c,0xa6,0x1a,0x13,0x00,0xe3,0xa8,0x16,0x72,0x40,0xf9,0xf8,0x42),
(0x44,0x26,0x68,0x96,0x81,0xd9,0x45,0x3e,0x10,0x76,0xc6,0xa7,0x8b,0x39,0x43,0xe1),
(0x3a,0xb5,0x56,0x2a,0xc0,0x6d,0xb3,0x05,0x22,0x66,0xbf,0xdc,0x0b,0xfa,0x62,0x48),
(0xdd,0x20,0x11,0x06,0x36,0xc9,0xc1,0xcf,0xf6,0x27,0x52,0xbb,0x69,0xf5,0xd4,0x87),
(0x7f,0x84,0x4c,0xd2,0x9c,0x57,0xa4,0xbc,0x4f,0x9a,0xdf,0xfe,0xd6,0x8d,0x7a,0xeb),
(0x2b,0x53,0xd8,0x5c,0xa1,0x14,0x17,0xfb,0x23,0xd5,0x7d,0x30,0x67,0x73,0x08,0x09),
(0xee,0xb7,0x70,0x3f,0x61,0xb2,0x19,0x8e,0x4e,0xe5,0x4b,0x93,0x8f,0x5d,0xdb,0xa9),
(0xad,0xf1,0xae,0x2e,0xcb,0x0d,0xfc,0xf4,0x2d,0x46,0x6e,0x1d,0x97,0xe8,0xd1,0xe9),
(0x4d,0x37,0xa5,0x75,0x5e,0x83,0x9e,0xab,0x82,0x9d,0xb9,0x1c,0xe0,0xcd,0x49,0x89),
(0x01,0xb6,0xbd,0x58,0x24,0xa2,0x5f,0x38,0x78,0x99,0x15,0x90,0x50,0xb8,0x95,0xe4),
(0xd0,0x91,0xc7,0xce,0xed,0x0f,0xb4,0x6f,0xa0,0xcc,0xf0,0x02,0x4a,0x79,0xc3,0xde),
(0xa3,0xef,0xea,0x51,0xe6,0x6b,0x18,0xec,0x1b,0x2c,0x80,0xf7,0x74,0xe7,0xff,0x21),
(0x5a,0x6a,0x54,0x1e,0x41,0x31,0x92,0x35,0xc4,0x33,0x07,0x0a,0xba,0x7e,0x0e,0x34),
(0x88,0xb1,0x98,0x7c,0xf3,0x3d,0x60,0x6c,0x7b,0xca,0xd3,0x1f,0x32,0x65,0x04,0x28),
(0x64,0xbe,0x85,0x9b,0x2f,0x59,0x8a,0xd7,0xb0,0x25,0xac,0xaf,0x12,0x03,0xe2,0xf2)
)
def LAT_dot( a , b ):
a = "{0:08b}".format(a)
b = "{0:08b}".format(b)
out = 0
if a[0]=='1' and b[0]=='1':
out = 1
for i in range(1,8):
if a[i]=='1' and b[i]=='1':
out = out^1
else:
out = out ^ 0
return out
LAT0 = numpy.zeros( (256,256) )
LAT0 = LAT0.astype(int)
LAT1 = numpy.zeros( (256,256) )
LAT1 = LAT1.astype(int)
def compute_LAT(s_box, LAT):
DOT = numpy.zeros( (256,256) )
DOT = DOT.astype(int)
sbox_val = []
for p2 in range(256):
col = p2 >> 4
row = p2 & 15
sbox_val.append( s_box[row][col] )
for p1 in range(256):
for p2 in range(256):
DOT[p1][p2] = LAT_dot(p1,p2)
for a in range(256):
for b in range(256):
for i in range(256):
LAT[a][b] += DOT[a][i]^(DOT[b,sbox_val[i]])
LAT[a][b] = 256 - LAT[a][b]
LAT[a][b] = LAT[a][b] - 128
#compute S0 LAT
print('*************************ZUC S0 LAT******************')
compute_LAT(S0_Box, LAT0)
print_table(LAT0, './ZUC_S0_LAT.txt')
print('\n')
#compute S1 LAT
print('*************************ZUC S1 LAT******************')
compute_LAT(S1_Box, LAT1)
print_table(LAT1, './ZUC_S1_LAT.txt')
|
python
|
# -*- coding: utf-8 -*-
# created: 2021-06-30
# creator: [email protected]
import asyncio
import logging
import threading
from abc import abstractmethod
from datetime import datetime
import paho.mqtt.client as mqtt
from gcommon.server.server_config import ServerConfig
from gcommon.utils import gtime
logger = logging.getLogger("mqtt")
class MqttConfig(ServerConfig):
pass
class MqttObserverBase(object):
mqtt_listener = None
def set_mqtt_listener(self, listener):
self.mqtt_listener = listener
@abstractmethod
def on_mqtt_connected(self, _client, _user_data, _flags, rc):
print(_client)
@staticmethod
def on_mqtt_message(self, _client, _user_data, message):
print(message.payload)
class MqttListener(threading.Thread):
def __init__(self, config: MqttConfig, observer: MqttObserverBase):
threading.Thread.__init__(self)
# daemon thread, 在按下 ctrl-c 之后程序可以退出
self.daemon = True
self.observer = observer
self.config = config
client_id = "rcs" + gtime.date_str_by_minute()
self.client = mqtt.Client(client_id=client_id)
# asyncio loop
self.loop = asyncio.get_running_loop()
def run(self) -> None:
"""注意:所有回调函数都在独立线程中执行"""
self.client.on_connect = self.on_connect
self.client.on_message = self.on_message
self.client.on_subscribe = self.on_subscribe
# 建立连接
if self.config.enable_ssl:
self.client.tls_set()
self.client.connect(self.config.server_address, self.config.server_port, 60)
self.client.username_pw_set(self.config.username, self.config.password)
self.client.loop_forever()
def on_subscribe_v5(self, client, userdata, mid, reasonCodes, properties):
pass
def on_subscribe(self, client, userdata, mid, granted_qos):
pass
def on_connect(self, client, userdata, flags, rc):
logger.info('Connected with result code: %s, msg: %s',
str(rc), mqtt.error_string(rc))
if rc != mqtt.MQTT_ERR_SUCCESS:
return
# client.subscribe('robot/')
assert client == self.client
# self.client.subscribe("robot/+/topic/task_status")
self.loop.call_soon_threadsafe(self.observer.on_mqtt_connected, client, userdata, flags, rc)
def subscribe(self, topic, qos=0, options=None, properties=None):
result, mid = self.client.subscribe(topic, qos, options, properties)
if result != mqtt.MQTT_ERR_SUCCESS:
logger.error('cannot subscribe topic: %s, code: %s, msg: %s',
topic, result, mqtt.error_string(result))
return False
return True
def unsubscribe(self, topic, properties=None):
self.client.unsubscribe(topic, properties)
@abstractmethod
def on_message(self, client, userdata, message):
logger.info(message.topic + " " + str(message.payload))
self.loop.call_soon_threadsafe(self.observer.on_mqtt_message, client, userdata, message)
|
python
|
from scipy.optimize import minimize
from numpy.random import random
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm, ticker
from matplotlib.colors import LogNorm
import numpy as np
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib import pyplot
import timeit
def beale(x):
f1 = 1.5 - x[0] * ( 1.0 - x[1] )
f2 = 2.25 - x[0] * ( 1.0 - x[1] ** 2 )
f3 = 2.625 - x[0] * ( 1.0 - x[1] ** 3 )
f = f1 ** 2 + f2 ** 2 + f3 ** 2
return f
# Plot the function
fig = plt.figure()
ax = Axes3D(fig, azim = -128, elev = 43)
s = .1
X = np.arange(-5, 5.+s, s)
Y = np.arange(-5, 5.+s, s)
X, Y = np.meshgrid(X, Y)
Z = beale(2, [X, Y])
#ax.plot_surface(X, Y, Z, rstride = 1, cstride = 1, norm = LogNorm(), cmap = cm.jet, linewidth=0, edgecolor='none')
ax.plot_surface(X, Y, Z, rstride = 1, cstride = 1, norm=LogNorm(), cmap = cm.jet, linewidth=0, edgecolor='none')
plt.xlabel("x")
plt.ylabel("y")
plt.title("Beale's")
plt.savefig(beale.png)
#########################################
x0s = []
for i in range(0, 30):
x0 = (random(2)-1)*20
x0s.append(x0)
iters = []
feval = []
sol = []
objective = []
times= []
for i in range(0, 30):
start_time = timeit.default_timer()
output = minimize(beale, x0s[i], method='L-BFGS-B', options= {'disp': True})
times.append(timeit.default_timer() - start_time)
iters.append(output.nit)
feval.append(output.nfev)
sol.append(output.x)
objective.append(output.fun)
#####################################
delta = 0.05
s = 0.05
X = np.arange(-3, 5, delta)
Y = np.arange(-3, 3, delta)
X, Y = np.meshgrid(X, Y)
Z = beale([X, Y])
levels = np.arange(10, 300, 10)
#plt.contour(X, Y, Z, levels=levels, norm=LogNorm())
plt.contour(X, Y, Z, levels=[0.1, 0.2, 0.3, 0.5, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 30, 50, 60, 63, 66, 70, 75, 80, 100])
plt.title('Isolines')
plt.xlabel('X1')
plt.ylabel('X2')
xs = []
ys = []
def bstop(xk):
xs.append(np.copy(xk))
ys.append(beale(xk))
xs = [np.array([-1, -1])]
ys = [beale(xs[0])]
minimize(beale, [-1, -1], method='BFGS', callback=bstop, options= {'disp': True})
linex = [-1]
liney = [-1]
for i in xs:
linex.append(i[0])
liney.append(i[1])
bfgs_y = list(ys)
bfgs, = plt.plot(linex, liney, '-o', label='BFGS')
xs = [np.array([-1, -1])]
ys = [beale(xs[0])]
minimize(beale, [-1, -1], method='L-BFGS-B', callback=bstop, options= {'disp': True})
linex = [-1]
liney = [-1]
for i in xs:
linex.append(i[0])
liney.append(i[1])
lbfgsb_y = list(ys)
lbfgsb, = plt.plot(linex, liney, '-s', label='L-BFGS-B')
xs = [
np.array([-1, -1]),
np.array([0, -2.076923e-01]),
np.array([1.101268e+00, -9.677930e-01]),
np.array([8.970397e-01, -5.260371e-01]),
np.array([1.085339e+00, -5.058077e-01]),
np.array([1.832440e+00, -2.907016e-01]),
np.array([2.198566e+00, -5.155961e-02]),
np.array([2.692337e+00, 3.684094e-01]),
np.array([2.789503e+00, 4.511403e-01]),
np.array([2.795133e+00, 4.487888e-01]),
np.array([2.818547e+00, 4.483392e-01]),
np.array([2.840796e+00, 4.519267e-01]),
np.array([2.885289e+00, 4.612113e-01]),
np.array([2.923265e+00, 4.707860e-01]),
np.array([2.980495e+00, 4.865466e-01]),
np.array([3.024381e+00, 4.997452e-01]),
np.array([3.043476e+00, 5.064746e-01]),
np.array([3.047318e+00, 5.090894e-01]),
np.array([3.042225e+00, 5.097113e-01]),
np.array([3.030713e+00, 5.080590e-01]),
np.array([3.016008e+00, 5.050824e-01]),
np.array([3.006359e+00, 5.026518e-01]),
np.array([2.999553e+00, 5.005949e-01]),
np.array([2.997714e+00, 4.997436e-01]),
np.array([2.998416e+00, 4.996591e-01]),
np.array([2.999443e+00, 4.998514e-01]),
np.array([2.999928e+00, 4.999741e-01]),
np.array([3.000001e+00, 4.999987e-01])
]
ys = [
3.870312e+01,
1.420312e+01,
5.474402e+00,
5.132615e+00,
4.056161e+00,
1.634935e+00,
8.440893e-01,
5.062609e-02,
1.015695e-02,
8.785395e-03,
6.671388e-03,
5.511229e-03,
3.959797e-03,
2.900633e-03,
1.691332e-03,
1.011259e-03,
6.995383e-04,
4.831696e-04,
2.805545e-04,
1.529509e-04,
7.094062e-05,
3.357103e-05,
1.152309e-05,
3.063215e-06,
4.650093e-07,
5.222919e-08,
2.294078e-09,
4.511352e-11,
1.837179e-13
]
linex = []
liney = []
for i in xs:
linex.append(i[0])
liney.append(i[1])
powell_y = list(ys)
powell, = plt.plot(linex, liney, '-^', label='DFP')
plt.legend(handles=[bfgs, lbfgsb, powell])
plt.title('Isolines')
plt.xlabel('x1')
plt.ylabel('x2')
plt.figure()
b, = plt.plot(bfgs_y, '-o', label='BFGS')
l, = plt.plot(lbfgsb_y, '-s', label='L-BFGS-B')
p, = plt.plot(powell_y, '-^', label='DFP')
pyplot.yscale('log')
plt.grid(True)
plt.title('Objective')
plt.legend(handles=[b, l, p])
plt.xlabel('Number of Iterations')
plt.ylabel('Objective')
|
python
|
# Generated by Django 3.0.11 on 2020-11-11 20:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0003_auto_20201111_1505'),
]
operations = [
migrations.AlterField(
model_name='language',
name='listing',
field=models.ManyToManyField(blank=True, related_name='languages', to='posts.Post'),
),
migrations.AlterField(
model_name='tool',
name='listing',
field=models.ManyToManyField(blank=True, related_name='tools', to='posts.Post'),
),
]
|
python
|
## Written by Daniel Buscombe,
## MARDA Science
## [email protected]
##> Release v1.3 (July 2020)
###===================================================
# import libraries
import sys, getopt, json, os
# set to False if you wish to use cpu (not recommended)
##True or False
USE_GPU = True
# PREDICT = False
#
# ##OS
# if PREDICT == True:
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
if USE_GPU == True:
##use the first available GPU
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
else:
## to use the CPU (not recommended):
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
from numpy import any as npany
from sedinet_infer import *
#==============================================================
if __name__ == '__main__':
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv,"h:c:")
except getopt.GetoptError:
print('python sedinet_train.py -c configfile.json')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print(
'Example usage: python sedinet_train.py -c config/config_9percentiles.json'
)
sys.exit()
elif opt in ("-c"):
configfile = arg
# load the user configs
with open(os.getcwd()+os.sep+configfile) as f:
config = json.load(f)
###===================================================
## user defined variables: proportion of data to use for training (a.k.a. the "train/test split")
train_csvfile = config["train_csvfile"]
#csvfile containing image names and class values
test_csvfile = config["test_csvfile"]
#csvfile containing image names and class values
res_folder = config["res_folder"]
#folder containing csv file and that will contain model outputs
name = config["name"]
#name prefix for output files
#convert imagery to greyscale or not
dropout = config["dropout"]
#dropout factor
scale = config["scale"] #do scaling on variable
greyscale = config['greyscale']
try:
numclass = config['numclass']
except:
numclass = 0
try:
greyscale = config['greyscale']
except:
greyscale = True
#output variables
vars = [k for k in config.keys() if not npany([k.startswith('base'),
k.startswith('MIN_LR'), k.startswith('DO_AUG'), k.startswith('SHALLOW'), k.startswith('MAX_LR'),
k.startswith('res_folder'), k.startswith('train_csvfile'), k.startswith('csvfile'),
k.startswith('test_csvfile'), k.startswith('name'),
k.startswith('greyscale'), k.startswith('aux_in'),
k.startswith('dropout'), k.startswith('N'),
k.startswith('scale'), k.startswith('numclass')])]
vars = sorted(vars)
auxin = [k for k in config.keys() if k.startswith('aux_in')]
if len(auxin) > 0:
auxin = config[auxin[0]]
##at least for now, just one 'auxilliary'
## (numerical/categorical) input in addition to imagery
if len(vars) ==1:
mode = 'miso'
elif len(vars) >1:
mode = 'mimo'
else:
if len(vars) ==1:
mode = 'siso'
elif len(vars) >1:
mode = 'simo'
print("Mode: %s" % (mode))
###==================================================
train_csvfile = res_folder+os.sep+train_csvfile
test_csvfile = res_folder+os.sep+test_csvfile
if (mode=='siso' or mode=='simo'):
run_training_siso_simo(vars, train_csvfile, test_csvfile,
name, res_folder, mode, greyscale,
dropout, numclass, scale)
# if (mode=='miso' or mode=='mimo'):
# run_training_miso_mimo(vars, train_csvfile, test_csvfile,
# name, res_folder, mode, greyscale,
# auxin, dropout, numclass, scale)
|
python
|
"""Expected errors."""
import inspect
import sys
UNREPRODUCIBLE_SUGGESTION_TEXT = (
'Here are things you can try:\n'
'- Run outside XVFB (e.g. you will be able to see the launched program '
'on screen.) with `--disable-xvfb`, which is especially useful for '
'Chrome.\n'
'- Run with the downloaded build by adding `--build download`.\n'
'- Run `build/install-build-deps.sh` to ensure all dependencies are '
'installed.\n'
'- Run with more number of trials by adding `-i 10`, '
'which is especially good for gesture-related testcases.\n'
'- Use gdb to debug by adding `--enable-debug`.')
def get_class(exit_code):
"""Get class name given an exit code."""
code_to_klass = {}
for _, obj in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(obj) and obj != ExpectedException:
if obj.EXIT_CODE not in code_to_klass:
code_to_klass[obj.EXIT_CODE] = obj
else:
raise Exception(
'%s and %s have the same exit code.' % (
code_to_klass[obj.EXIT_CODE].__name__, obj.__name__))
return code_to_klass.get(exit_code, UnknownExitCodeError)
class ExpectedException(Exception):
"""A general Exception to extend from."""
def __init__(self, message, exit_code, extras=None):
super(ExpectedException, self).__init__(message)
self.extras = extras
self.exit_code = exit_code
class UnknownExitCodeError(ExpectedException):
"""Represents an unknown exit code error."""
EXIT_CODE = 256
class MinimizationNotFinishedError(ExpectedException):
"""Raise when the minimize_task failed or hasn't finished yet. When the
minimization is not finished, we won't find 'Running command: ' in the
stacktrace."""
MESSAGE = (
'The testcase hasn\'t been minimized yet or cannot be minimized.\n'
'If the testcase is new, please wait for a few more hours.\n'
'If we can\'t minimize the testcase, it means the testcase is '
'unreproducible and, thus, not supported by this tool.\n'
'If this testcase was found by AFL or libFuzzer, you can use the "-f"'
'flag to force this tool to try to reproduce the testcase.'
)
EXIT_CODE = 42
def __init__(self):
super(MinimizationNotFinishedError, self).__init__(
self.MESSAGE, self.EXIT_CODE)
class SanitizerNotProvidedError(ExpectedException):
"""An error to notify when a sanitizer isn't passed to a Definition"""
MESSAGE = 'A sanitizer must be provided with each Definition.'
EXIT_CODE = 43
def __init__(self):
super(SanitizerNotProvidedError, self).__init__(
self.MESSAGE, self.EXIT_CODE)
class ClusterFuzzError(ExpectedException):
"""An exception to deal with clusterfuzz.com's errors.
Makes the response dict available for inspection later on when
the exception is dealt with."""
MESSAGE = (
"Error calling clusterfuzz.com's API.\n"
'User: {identity}\n'
"Response: {response}")
EXIT_CODE = 44
def __init__(self, status_code, response, identity):
super(ClusterFuzzError, self).__init__(
self.MESSAGE.format(response=str(response), identity=identity),
self.EXIT_CODE)
self.status_code = status_code
self.response = response
self.identity = identity
class PermissionsTooPermissiveError(ExpectedException):
"""An exception to deal with file permissions errors.
Stores the filename and the current permissions.."""
MESSAGE = ('File permissions too permissive to open {filename}\n'
'Current permissions: {permission}\nExpected user access only'
'\nYou can run "chmod 600 {filename}filename" to fix this issue')
EXIT_CODE = 45
def __init__(self, filename, current_permissions):
super(PermissionsTooPermissiveError, self).__init__(
self.MESSAGE.format(filename=filename, permission=current_permissions),
self.EXIT_CODE)
self.filename = filename
self.current_permissions = current_permissions
class GomaNotInstalledError(ExpectedException):
"""An exception to tell people GOMA isn not installed."""
MESSAGE = ('Either goma is not installed, or $GOMA_DIR is not set.'
' Please set up goma before continuing. '
'See go/ma to learn more.\n\n'
"If you wouldn't like to use goma, "
'please re-run with --disable-goma.')
EXIT_CODE = 46
def __init__(self):
super(GomaNotInstalledError, self).__init__(self.MESSAGE, self.EXIT_CODE)
class JobTypeNotSupportedError(ExpectedException):
"""An exception raised when user tries to run an unsupported build type."""
# pylint: disable=line-too-long
MESSAGE = (
'Unfortunately, the job {job_type} is not yet supported.'
'If you believe that the crash will occur on Linux as well, please go '
'to https://clusterfuzz.com/upload-testcase?upload=true&testcaseId={testcase_id} '
'and choose a corresponding Linux job type. Ask us for help at '
'[email protected].')
# pylint: enable=line-too-long
EXIT_CODE = 47
def __init__(self, job_type, testcase_id):
super(JobTypeNotSupportedError, self).__init__(
self.MESSAGE.format(job_type=job_type, testcase_id=testcase_id),
self.EXIT_CODE)
class NotInstalledError(ExpectedException):
"""An exception raised to tell the user to install the required binary."""
MESSAGE = (
'{binary} is not found. Please install it or ensure the path is '
'correct.\n'
'Most of the time you can install it with `apt-get install {binary}`.')
EXIT_CODE = 48
def __init__(self, binary):
super(NotInstalledError, self).__init__(
self.MESSAGE.format(binary=binary), self.EXIT_CODE)
class GsutilNotInstalledError(ExpectedException):
"""An exception raised to tell the user to install the required binary."""
MESSAGE = (
'gsutil is not installed. Please install it. See:'
'https://cloud.google.com/storage/docs/gsutil_install')
EXIT_CODE = 49
def __init__(self):
super(GsutilNotInstalledError, self).__init__(self.MESSAGE, self.EXIT_CODE)
class BadJobTypeDefinitionError(ExpectedException):
"""An exception raised when a job type description is malformed."""
MESSAGE = (
'The definition for the {job_type} job type is incorrectly formatted or'
' missing crucial information.')
EXIT_CODE = 50
def __init__(self, job_type):
super(BadJobTypeDefinitionError, self).__init__(
self.MESSAGE.format(job_type=job_type), self.EXIT_CODE)
class UnreproducibleError(ExpectedException):
"""An exception raised when the crash cannot be reproduced."""
MESSAGE = (
'The crash cannot be reproduced after trying {count} times.\n'
+ UNREPRODUCIBLE_SUGGESTION_TEXT)
EXIT_CODE = 51
def __init__(self, count, crash_signatures):
crash_signatures = [
{'type': s.crash_type, 'state': s.crash_state_lines,
'output': s.output[:100000]}
for s in list(crash_signatures)[:10]
]
super(UnreproducibleError, self).__init__(
message=self.MESSAGE.format(count=count),
exit_code=self.EXIT_CODE,
extras={'signatures': crash_signatures})
class DirtyRepoError(ExpectedException):
"""An exception raised when the repo is dirty. Therefore, we cannot checkout
to a wanted sha."""
MESSAGE = (
"We can't run the checkout command because {source_dir} has "
'uncommitted changes.\n '
'please commit or stash these changes and re-run this tool.')
EXIT_CODE = 52
def __init__(self, source_dir):
super(DirtyRepoError, self).__init__(
self.MESSAGE.format(source_dir=source_dir), self.EXIT_CODE)
class CommandFailedError(ExpectedException):
"""An exception raised when the command doesn't return 0."""
MESSAGE = '`{cmd}` failed with the return code {returncode}.'
EXIT_CODE = 53
def __init__(self, command, returncode, stderr):
super(CommandFailedError, self).__init__(
self.MESSAGE.format(cmd=command, returncode=returncode),
self.EXIT_CODE,
extras={'stderr': stderr[:100000]})
class KillProcessFailedError(ExpectedException):
"""An exception raised when the process cannot be killed."""
MESSAGE = '`{command}` (pid={pid}) cannot be killed.'
EXIT_CODE = 54
def __init__(self, command, pid):
super(KillProcessFailedError, self).__init__(
self.MESSAGE.format(command=command, pid=pid),
self.EXIT_CODE)
class UserRespondingNoError(ExpectedException):
"""An exception raised when the user decides not to proceed."""
MESSAGE = 'User responding "no" to "{question}"'
EXIT_CODE = 55
def __init__(self, question):
super(UserRespondingNoError, self).__init__(
self.MESSAGE.format(question=question),
self.EXIT_CODE)
class InvalidTestcaseIdError(ExpectedException):
"""An exception when the testcase id is invalid."""
MESSAGE = (
'The testcase ID ({testcase_id}) is invalid.\n'
"Please double-check if there's a typo.\n"
'Also, can you access '
'https://clusterfuzz.com/testcase-detail/{testcase_id} ?')
EXIT_CODE = 56
def __init__(self, testcase_id):
super(InvalidTestcaseIdError, self).__init__(
self.MESSAGE.format(testcase_id=str(testcase_id)), self.EXIT_CODE)
class UnauthorizedError(ExpectedException):
"""An exception when the user cannot access the testcase."""
MESSAGE = (
"You ({identity}) aren't allowed to access the testcase ID "
'({testcase_id}). Can you access '
'https://clusterfuzz.com/testcase-detail/{testcase_id} ?')
EXIT_CODE = 57
def __init__(self, testcase_id, identity):
super(UnauthorizedError, self).__init__(
self.MESSAGE.format(identity=identity, testcase_id=str(testcase_id)),
self.EXIT_CODE)
class DifferentStacktraceError(ExpectedException):
"""An exception raised when the resulting crash is different."""
MESSAGE = (
'The original crash cannot be reproduced after trying {count} times.\n'
'But it seems we get a different stacktrace. Could you check if the '
'stacktrace is good enough?\n\n' + UNREPRODUCIBLE_SUGGESTION_TEXT)
EXIT_CODE = 58
def __init__(self, count, crash_signatures):
crash_signatures = [
{'type': s.crash_type, 'state': s.crash_state_lines,
'output': s.output[:50000]}
for s in list(crash_signatures)[:10]
]
super(DifferentStacktraceError, self).__init__(
message=self.MESSAGE.format(count=count),
exit_code=self.EXIT_CODE,
extras={'signatures': crash_signatures})
class GdbNotSupportedOnAndroidError(ExpectedException):
"""An exception raised when debug is enabled on Android."""
MESSAGE = "--enable-debug (or gdb) isn't supported in Android."
EXIT_CODE = 59
def __init__(self):
super(GdbNotSupportedOnAndroidError, self).__init__(
message=self.MESSAGE, exit_code=self.EXIT_CODE)
class BootFailed(ExpectedException):
"""An exception is raised after device failed to complete boot."""
MESSAGE = (
'Device failed to finish boot. Please inspect logcat output to '
'identify the issue.')
EXIT_CODE = 60
def __init__(self):
super(BootFailed, self).__init__(
message=self.MESSAGE, exit_code=self.EXIT_CODE)
class NoAndroidDeviceIdError(ExpectedException):
"""An exception is raised after installing ASAN on Android"""
MESSAGE = 'Please set the target Android device ID as the env {env_name}.'
EXIT_CODE = 61
def __init__(self, env_name):
super(NoAndroidDeviceIdError, self).__init__(
message=self.MESSAGE.format(env_name=env_name),
exit_code=self.EXIT_CODE)
class GclientManagedEnabledException(ExpectedException):
"""An exception is raised when .gclient contains managed=True."""
# pylint: disable=line-too-long
MESSAGE = (
'Please disabled `managed` in {dot_gclient_path}. `managed=True` has '
'been deprecated, and it checkouts repo to a wrong SHA. See: '
'https://www.chromium.org/developers/how-tos/get-the-code/gclient-managed-mode'
)
# pylint: enable=line-too-long
EXIT_CODE = 62
def __init__(self, dot_gclient_path):
super(GclientManagedEnabledException, self).__init__(
message=self.MESSAGE.format(dot_gclient_path=dot_gclient_path),
exit_code=self.EXIT_CODE)
|
python
|
import tensorflow as tf
import fire
import json
import os
import numpy as np
import tensorflow as tf
from tensorflow.python.training.input import batch
import model, sample, encoder
import model
import numpy as np
import tensorflow as tf
from tensorflow.contrib.training import HParams
def interact_model(
model_name='124M',
seed=None,
nsamples=1,
batch_size=1,
length=None,
temperature=1,
top_k=0,
top_p=1,
models_dir='models',
):
"""
Interactively run the model
:model_name=124M : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to reproduce
results
:nsamples=1 : Number of samples to return total
:batch_size=1 : Number of batches (only affects speed/memory). Must divide nsamples.
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:models_dir : path to parent folder containing model subfolders
(i.e. contains the <model_name> folder)
"""
models_dir = os.path.expanduser(os.path.expandvars(models_dir))
if batch_size is None:
batch_size = 1
assert nsamples % batch_size == 0
enc = encoder.get_encoder(model_name, models_dir)
hparams = model.default_hparams()
with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = hparams.n_ctx // 2
elif length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
with tf.Session(graph=tf.Graph()) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.set_random_seed(seed)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
print("hparams :",hparams,'\n',
"length :", length,'\n',
"context :", context,'\n',
"batch_size :",batch_size,'\n',
"temperature :", temperature,'\n',
"top_k :",top_k,'\n',
"top_p :",top_p,'\n')
'''
hparams : [('n_ctx', 1024), ('n_embd', 768), ('n_head', 12), ('n_layer', 12), ('n_vocab', 50257)]
length : 512
context : Tensor("Placeholder:0", shape=(1, ?), dtype=int32)
batch_size : 1
temperature : 1
top_k : 0
top_p : 1
'''
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))
saver.restore(sess, ckpt)
while True:
raw_text = input("Model prompt >>> ")
while not raw_text:
print('Prompt should not be empty!')
raw_text = input("Model prompt >>> ")
context_tokens = enc.encode(raw_text)
generated = 0
for _ in range(nsamples // batch_size):
out = sess.run(output, feed_dict={
context: [context_tokens for _ in range(batch_size)]
})[:, len(context_tokens):]
for i in range(batch_size):
generated += 1
text = enc.decode(out[i])
print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
print(text)
print("=" * 80)
if __name__ == '__main__':
fire.Fire(interact_model)
################################################################################
def default_hparams():
return HParams(
n_vocab=0,
n_ctx=1024,
n_embd=768,
n_head=12,
n_layer=12,
)
def shape_list(x):
"""Deal with dynamic shape in tensorflow cleanly."""
static = x.shape.as_list()
dynamic = tf.shape(x)
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
def softmax(x, axis=-1):
x = x - tf.reduce_max(x, axis=axis, keepdims=True)
ex = tf.exp(x)
return ex / tf.reduce_sum(ex, axis=axis, keepdims=True)
def gelu(x):
return 0.5*x*(1+tf.tanh(np.sqrt(2/np.pi)*(x+0.044715*tf.pow(x, 3))))
def norm(x, scope, *, axis=-1, epsilon=1e-5):
"""Normalize to mean = 0, std = 1, then do a diagonal affine transform."""
with tf.variable_scope(scope):
n_state = x.shape[-1].value
g = tf.get_variable('g', [n_state], initializer=tf.constant_initializer(1))
b = tf.get_variable('b', [n_state], initializer=tf.constant_initializer(0))
u = tf.reduce_mean(x, axis=axis, keepdims=True)
s = tf.reduce_mean(tf.square(x-u), axis=axis, keepdims=True)
x = (x - u) * tf.rsqrt(s + epsilon)
x = x*g + b
return x
def split_states(x, n):
"""Reshape the last dimension of x into [n, x.shape[-1]/n]."""
*start, m = shape_list(x)
return tf.reshape(x, start + [n, m//n])
def merge_states(x):
"""Smash the last two dimensions of x into a single dimension."""
*start, a, b = shape_list(x)
return tf.reshape(x, start + [a*b])
def conv1d(x, scope, nf, *, w_init_stdev=0.02):
with tf.variable_scope(scope):
*start, nx = shape_list(x)
w = tf.get_variable('w', [1, nx, nf], initializer=tf.random_normal_initializer(stddev=w_init_stdev))
b = tf.get_variable('b', [nf], initializer=tf.constant_initializer(0))
c = tf.reshape(tf.matmul(tf.reshape(x, [-1, nx]), tf.reshape(w, [-1, nf]))+b, start+[nf])
return c
def attention_mask(nd, ns, *, dtype):
"""1's in the lower triangle, counting from the lower right corner.
Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs.
"""
i = tf.range(nd)[:,None]
j = tf.range(ns)
m = i >= j - ns + nd
return tf.cast(m, dtype)
def attn(x, scope, n_state, *, past, hparams):
assert x.shape.ndims == 3 # Should be [batch, sequence, features]
assert n_state % hparams.n_head == 0
if past is not None:
assert past.shape.ndims == 5 # Should be [batch, 2, heads, sequence, features], where 2 is [k, v]
def split_heads(x):
# From [batch, sequence, features] to [batch, heads, sequence, features]
return tf.transpose(split_states(x, hparams.n_head), [0, 2, 1, 3])
def merge_heads(x):
# Reverse of split_heads
return merge_states(tf.transpose(x, [0, 2, 1, 3]))
def mask_attn_weights(w):
# w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst.
_, _, nd, ns = shape_list(w)
b = attention_mask(nd, ns, dtype=w.dtype)
b = tf.reshape(b, [1, 1, nd, ns])
w = w*b - tf.cast(1e10, w.dtype)*(1-b)
return w
def multihead_attn(q, k, v):
# q, k, v have shape [batch, heads, sequence, features]
w = tf.matmul(q, k, transpose_b=True)
w = w * tf.rsqrt(tf.cast(v.shape[-1].value, w.dtype))
w = mask_attn_weights(w)
w = softmax(w)
a = tf.matmul(w, v)
return a
with tf.variable_scope(scope):
c = conv1d(x, 'c_attn', n_state*3)
q, k, v = map(split_heads, tf.split(c, 3, axis=2))
present = tf.stack([k, v], axis=1)
if past is not None:
pk, pv = tf.unstack(past, axis=1)
k = tf.concat([pk, k], axis=-2)
v = tf.concat([pv, v], axis=-2)
a = multihead_attn(q, k, v)
a = merge_heads(a)
a = conv1d(a, 'c_proj', n_state)
return a, present
def mlp(x, scope, n_state, *, hparams):
with tf.variable_scope(scope):
nx = x.shape[-1].value
h = gelu(conv1d(x, 'c_fc', n_state))
h2 = conv1d(h, 'c_proj', nx)
return h2
def block(x, scope, *, past, hparams):
with tf.variable_scope(scope):
nx = x.shape[-1].value
a, present = attn(norm(x, 'ln_1'), 'attn', nx, past=past, hparams=hparams)
x = x + a
m = mlp(norm(x, 'ln_2'), 'mlp', nx*4, hparams=hparams)
x = x + m
return x, present
def past_shape(*, hparams, batch_size=None, sequence=None):
return [batch_size, hparams.n_layer, 2, hparams.n_head, sequence, hparams.n_embd // hparams.n_head]
def expand_tile(value, size):
"""Add a new axis of given size."""
value = tf.convert_to_tensor(value, name='value')
ndims = value.shape.ndims
return tf.tile(tf.expand_dims(value, axis=0), [size] + [1]*ndims)
def positions_for(tokens, past_length):
batch_size = tf.shape(tokens)[0]
nsteps = tf.shape(tokens)[1]
return expand_tile(past_length + tf.range(nsteps), batch_size)
def model(hparams, X, past=None, scope='model', reuse=False):
with tf.variable_scope(scope, reuse=reuse):
results = {}
batch, sequence = shape_list(X)
wpe = tf.get_variable('wpe', [hparams.n_ctx, hparams.n_embd],
initializer=tf.random_normal_initializer(stddev=0.01))
wte = tf.get_variable('wte', [hparams.n_vocab, hparams.n_embd],
initializer=tf.random_normal_initializer(stddev=0.02))
past_length = 0 if past is None else tf.shape(past)[-2]
h = tf.gather(wte, X) + tf.gather(wpe, positions_for(X, past_length))
# Transformer
presents = []
pasts = tf.unstack(past, axis=1) if past is not None else [None] * hparams.n_layer
assert len(pasts) == hparams.n_layer
for layer, past in enumerate(pasts):
h, present = block(h, 'h%d' % layer, past=past, hparams=hparams)
presents.append(present)
results['present'] = tf.stack(presents, axis=1)
h = norm(h, 'ln_f')
# Language model loss. Do tokens <n predict token n?
h_flat = tf.reshape(h, [batch*sequence, hparams.n_embd])
logits = tf.matmul(h_flat, wte, transpose_b=True)
print(logits)
logits = tf.reshape(logits, [batch, sequence, hparams.n_vocab])
results['logits'] = logits
return results
###########################################################################
def top_k_logits(logits, k):
if k == 0:
# no 절단
return logits
def _top_k():
values, _ = tf.nn.top_k(logits, k=k)
min_values = values[:, -1, tf.newaxis]
print(min_values)
# tf.newaxis : size(차원) 변경
return tf.where(
# tf.where(bool type 텐서, true일 때 출력값, false일 때 출력값)
# x, y가 없으면 참 요소의 좌표(2D 텐서)를 반환한다.
logits < min_values,
tf.ones_like(logits, dtype=logits.dtype) * -1e10, # -100억
# tf.ones_like : 모든 요소가 1로 설정된 tensor와 동일한 유형 및 모양의 tensor를 리턴한다.
logits,
)
# tf.cond : tf.equal이면 logits 동작이 실행되고, _top_k()는 실행되지 않는다.
return tf.cond(
tf.equal(k, 0), # k == 0
lambda: logits,
lambda: _top_k(),
)
def top_p_logits(logits, p):
"""핵심 sampling"""
batch, _ = logits.shape.as_list()
sorted_logits = tf.sort(logits, direction='DESCENDING', axis=-1)
# 내림차순으로 logits을 정렬, sort() = sort(axis=-1)
cumulative_probs = tf.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1)
# tf.cumsum : 누적 합계를 수행 (ex. ([a, b, c]) # [a, a + b, a + b + c] )
indices = tf.stack([
# indices = index의 복수
# tf.stack : (a,b,c)shape에서 N텐서의 길이가 주어졌을 때, axis=0이면 (n,a,b,c) / axis = 1 이면 (a,n,b,c)가 된다
tf.range(0, batch),
# number of indices to include
tf.maximum(tf.reduce_sum(tf.cast(cumulative_probs <= p, tf.int32), axis=-1) - 1, 0),
# cast : 텐서를 새로운 형태로 캐스팅하는데 사용한다.
# cumulative_probs가 p보다 작거나 같도록 하여 boolean형태로 나타낸다
# reduce_sum : 텐서의 차원들을 탐색하며 개체들의 총합을 계산한다.
# cast에서 나온 값에서 -1을 해주고 열 단위로 더해준다.
# tf.maximum : 최댓값 반환
], axis=-1)
min_values = tf.gather_nd(sorted_logits, indices)
return tf.where(
logits < min_values,
tf.ones_like(logits) * -1e10,
logits,
)
def sample_sequence(*, hparams, length, start_token=None, batch_size=None, context=None, temperature=1, top_k=0, top_p=1):
if start_token is None:
assert context is not None, 'Specify exactly one of start_token and context!'
# start_token이 none인 경우
# start token 이나 context 중 하나를 정확히 지정해야 한다.
else:
assert context is None, 'Specify exactly one of start_token and context!'
context = tf.fill([batch_size, 1], start_token)
# [batch size, 1] shape에서 start_token으로 다 채워준다.
print("냐냐 :", context)
def step(hparams, tokens, past=None):
lm_output = model.model(hparams=hparams, X=tokens, past=past, reuse=tf.AUTO_REUSE)
# reuse=tf.AUTO_REUSE : 변수가 없는 경우 변수를 생성하고 그렇지 않은 경우 반환한다.
logits = lm_output['logits'][:, :, :hparams.n_vocab]
presents = lm_output['present']
presents.set_shape(model.past_shape(hparams=hparams, batch_size=batch_size))
return {
'logits': logits,
'presents': presents,
}
with tf.name_scope('sample_sequence'): # 이름 범위
def body(past, prev, output):
next_outputs = step(hparams, prev, past=past)
logits = next_outputs['logits'][:, -1, :] / tf.to_float(temperature)
logits = top_k_logits(logits, k=top_k)
logits = top_p_logits(logits, p=top_p)
samples = tf.multinomial(logits, num_samples=1, output_dtype=tf.int32)
# tf.multinomial : 다항분포로부터 샘플을 뽑아준다.
return [
next_outputs['presents'] if past is None else tf.concat([past, next_outputs['presents']], axis=-2),
samples,
tf.concat([output, samples], axis=1)
]
past, prev, output = body(None, context, context)
def cond(*args):
# *args : 여러개의 인자를 함수에 전달할 때 쓰인다.
return True
_, _, tokens = tf.while_loop(
cond=cond, body=body,
maximum_iterations=length - 1,
loop_vars=[
past,
prev,
output
],
shape_invariants=[
tf.TensorShape(model.past_shape(hparams=hparams, batch_size=batch_size)),
tf.TensorShape([batch_size, None]),
tf.TensorShape([batch_size, None]),
],
back_prop=False,
)
return tokens
sample_sequence(
hparams= [('n_ctx', 1024), ('n_embd', 768), ('n_head', 12), ('n_layer', 12), ('n_vocab', 50257)],
length= 512,
context= tf.Tensor("Placeholder:0", shape=(1, )),
batch_size = 1,
temperature = 1,
top_k = 0,
top_p = 1)
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.datacatalog_v1.types import common
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.datacatalog.v1",
manifest={"SearchResultType", "SearchCatalogResult",},
)
class SearchResultType(proto.Enum):
r"""The different types of resources that can be returned in
search.
"""
SEARCH_RESULT_TYPE_UNSPECIFIED = 0
ENTRY = 1
TAG_TEMPLATE = 2
ENTRY_GROUP = 3
class SearchCatalogResult(proto.Message):
r"""A result that appears in the response of a search request.
Each result captures details of one entry that matches the
search.
Attributes:
search_result_type (google.cloud.datacatalog_v1.types.SearchResultType):
Type of the search result. This field can be
used to determine which Get method to call to
fetch the full resource.
search_result_subtype (str):
Sub-type of the search result. This is a dot-delimited
description of the resource's full type, and is the same as
the value callers would provide in the "type" search facet.
Examples: ``entry.table``, ``entry.dataStream``,
``tagTemplate``.
relative_resource_name (str):
The relative resource name of the resource in URL format.
Examples:
- ``projects/{project_id}/locations/{location_id}/entryGroups/{entry_group_id}/entries/{entry_id}``
- ``projects/{project_id}/tagTemplates/{tag_template_id}``
linked_resource (str):
The full name of the cloud resource the entry belongs to.
See:
https://cloud.google.com/apis/design/resource_names#full_resource_name.
Example:
- ``//bigquery.googleapis.com/projects/projectId/datasets/datasetId/tables/tableId``
modify_time (google.protobuf.timestamp_pb2.Timestamp):
Last-modified timestamp of the entry from the
managing system.
integrated_system (google.cloud.datacatalog_v1.types.IntegratedSystem):
Output only. This field indicates the entry's
source system that Data Catalog integrates with,
such as BigQuery or Cloud Pub/Sub.
user_specified_system (str):
This field indicates the entry's source
system that Data Catalog does not integrate
with.
fully_qualified_name (str):
Fully Qualified Name of the resource. There are two main
forms of FQNs: {system}:{project}.{dot-separated path to
resource} for non-regionalized resources
{system}:{project}.{location id}.{dot-separated path to
resource} for regionalized resources Examples:
- dataproc_metastore:projectId.locationId.instanceId.databaseId.tableId
- bigquery:table.project_id.dataset_id.table_id
"""
search_result_type = proto.Field(proto.ENUM, number=1, enum="SearchResultType",)
search_result_subtype = proto.Field(proto.STRING, number=2,)
relative_resource_name = proto.Field(proto.STRING, number=3,)
linked_resource = proto.Field(proto.STRING, number=4,)
modify_time = proto.Field(proto.MESSAGE, number=7, message=timestamp_pb2.Timestamp,)
integrated_system = proto.Field(
proto.ENUM, number=8, oneof="system", enum=common.IntegratedSystem,
)
user_specified_system = proto.Field(proto.STRING, number=9, oneof="system",)
fully_qualified_name = proto.Field(proto.STRING, number=10,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
python
|
from datetime import date, datetime
from typing import Any, Dict
from dateutil import relativedelta
from django.db.models import Sum, Count
from em.models import Account, Transaction
class AccountHelper(object):
date_fmt = '%m-%Y'
day_fmt = '%Y-%m-%d'
@staticmethod
def get_spendings(overall_expns, **filters):
spendings = list()
account_labels = list()
account_values = list()
# print(Transaction.objects.filter(date=date.today()).annotate(Sum('amount'))) #.aggregate(expense=Sum('amount')).get('expense'))
# print(Transaction.objects.values('account__name', 'account').filter(date=date.today()).annotate(Sum('amount'))) #.aggregate(expense=Sum('amount')).get('expense'))
spendings = Transaction.objects.values('account__name', 'account').filter(**filters).annotate(spendings=Sum('amount'))
if spendings:
for spending in spendings:
account_labels.append(spending.get('account__name'))
account_values.append(spending.get('spendings'))
return spendings, account_labels, account_values
# if overall_expns:
# for account in Account.objects.all():
# amount = Transaction.objects\
# .filter(account=account, **filters)\
# .aggregate(amount=Sum('amount')).get('amount')
# if amount:
# spendings.append({
# 'account': account,
# 'spendings': amount,
# 'percentage': int((amount / overall_expns) * 100)
# })
# account_labels.append(account.name)
# account_values.append(amount)
# return spendings, account_labels, account_values
@staticmethod
def get_account_details(context: Dict[Any, Any], **kwargs):
ref_month = kwargs.get('ref_month')
account: Account = context.get('account')
# today = date.today()
# print(today.day, account.statement_date, today.day > account.statement_date)
filters = dict()
dt = datetime.strptime(ref_month, AccountHelper.date_fmt) if ref_month else date.today()
if kwargs.get('from_dt') and kwargs.get('to_dt'):
from_dt, to_dt = (
datetime.strptime(kwargs.get('from_dt'), AccountHelper.day_fmt),
datetime.strptime(kwargs.get('to_dt'), AccountHelper.day_fmt)
)
filters.update(date__range = [from_dt, to_dt])
context['selected_range'] = f"{kwargs.get('from_dt')} - {kwargs.get('to_dt')}"
else:
context['selected_range'] = datetime.strftime(dt, '%m-%Y')
filters = dict(
date__month=dt.month,
date__year=dt.year
)
context['prev_month'] = dt - relativedelta.relativedelta(months=1)
context['next_month'] = dt + relativedelta.relativedelta(months=1)
context['cur_month'] = dt
context['spendings'] = Transaction.objects\
.filter(account=account, **filters)\
.aggregate(spendings=Sum('amount'))\
.get('spendings')
context['transactions'] = Transaction.objects.filter(account=account, **filters).order_by("-date")
return context
@staticmethod
def get_act_statments(account):
statement_dates = {
"Kotak": 15,
"Citi Credit": 20,
"HDFC Nayana": 20,
}
dt = statement_dates.get(account)
statments = dict()
if dt:
# st_dt = date.today() - relativedelta.relativedelta(days=i)
for i in range(5):
ref_dt = date.today() - relativedelta.relativedelta(months=i)
to_dt = date(ref_dt.year, ref_dt.month, dt)
from_dt = to_dt - relativedelta.relativedelta(months=1)
from_dt = from_dt + relativedelta.relativedelta(days=1)
# ?fromDate=2021-03-16&toDate=2021-04-14
qp = f'?fromDate={from_dt.strftime("%Y-%m-%d")}&toDate={to_dt.strftime("%Y-%m-%d")}'
statments[f'{from_dt.strftime("%b")}-{to_dt.strftime("%b")}'] = qp
return statments
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .type_convert import anything_to_string
from .type_convert import anything_to_bytes
from .spamc_header import SpamcHeader
from .spamc_body import SpamcBody
class SpamcProtocol(SpamcHeader, SpamcBody):
def __init__(self):
super().__init__()
def create_request(self):
request_body = b''
if self.is_have_header(b'Compress') == True:
request_body = self.zlib_compress_data(self.body)
else:
request_body = self.body
self.set_content_length(len(request_body))
request = (
b'%(headers)b\r\n'
b'%(body)b'
)
return request % {
b'headers': self.create_header_request(),
b'body': self.body
}
def create_simple_request(self, input_method, input_message):
self.set_method(input_method)
self.body = input_message
return self.create_request()
@staticmethod
def split_spamd_message(input_message):
try:
bytes_header, sep, bytes_body = input_message.partition(b'\r\n\r\n')
return bytes_header, bytes_body
except Exception as err:
raise RuntimeError('Protocol Error')
def load_from_response(self, input_message):
bytes_header, bytes_body = self.split_spamd_message(input_message)
self.parse_header_bytes(bytes_header)
if self.get_content_length() != len(bytes_body):
return False
if self.is_have_header(b'Compress') == True:
response_body = self.zlib_decompress_data(bytes_body)
else:
response_body = bytes_body
self.body = response_body
return True
def is_full_response(self, input_message, is_check_length=True):
if input_message.startswith(b'SPAMD') == False:
raise RuntimeError('Protocol Error')
try:
bytes_header, bytes_body = self.split_spamd_message(input_message)
self.parse_header_bytes(bytes_header)
if is_check_length == True:
if self.is_have_header(b'Content-length') == False:
return False
if self.get_content_length() != len(bytes_body):
return False
return True
except Exception as err:
return False
if __name__ == '__main__':
pass
|
python
|
#!/usr/bin/python
#
# coveragePlot.py
#
# This program generates genomic coverage plots
# Chiu Laboratory
# University of California, San Francisco
# January, 2014
#
# Copyright (C) 2014 Charles Y Chiu - All Rights Reserved
# SURPI has been released under a modified BSD license.
# Please see license file for details.
import matplotlib
matplotlib.use('Agg')
from pylab import *
from pylab import figure, show, legend
from matplotlib import pyplot as plt
from distutils.version import LooseVersion
import numpy as np
import sys, os
import re
def smart_truncate1(text, max_length=100, suffix='...'):
"""Returns a string of at most `max_length` characters, cutting
only at word-boundaries. If the string was truncated, `suffix`
will be appended.
"""
if len(text) > max_length:
pattern = r'^(.{0,%d}\S)\s.*' % (max_length-len(suffix)-1)
return re.sub(pattern, r'\1' + suffix, text)
else:
return text
if len(sys.argv) < 3:
print "usage: coveragePlot.py <data file .map/.report> <title of plot> <log y-axes Y/N/B=both>"
sys.exit(-1)
dataFile = sys.argv[1]
mpl_version=matplotlib.__version__
# print "Installed version is: %s." % mpl_version
#load function is deprecated as of matplotlib v1.3.1, replaced with
if (LooseVersion(mpl_version) >= LooseVersion('1.3.1') ):
data = np.loadtxt(dataFile)
else:
data = mlab.load(dataFile)
outputFile = os.path.splitext(dataFile)[0]+".ps"
reportFile = os.path.splitext(dataFile)[0]+".report"
with open(reportFile) as f:
reportContent = f.readlines()
reportText = ""
logPlot = sys.argv[3]
for line in reportContent:
stripped_line = line.rstrip('\r\n\t ')
reportText = reportText + smart_truncate1(stripped_line, max_length=100, suffix='...') + "\n"
print "Loaded " + dataFile
hold(True)
if logPlot=='N':
fig=plt.figure(figsize=[8.5,4.5])
ax = fig.add_subplot(111)
fig.text(0.1,0.0,reportText, fontsize=9)
color ='k-'
plot(data[:,0],data[:,1],color)
xlabel("base position",fontsize=8)
ylabel("fold coverage",fontsize=8)
title_text = sys.argv[2]
suptitle(title_text,fontsize=9)
xMin, xMax, yMin, yMax = min(data[:,0]),max(data[:,0]),min(data[:,1]),max(data[:,1])
# add a 10% buffer to yMax
yMax *= 1.1
axis([xMin,xMax,yMin,yMax])
gcf().subplots_adjust(bottom=0.60)
plt.show()
if logPlot=='B':
fig=plt.figure(figsize=[8.5,4.5])
ax1 = fig.add_subplot(211)
color ='k-'
plot(data[:,0],data[:,1],color)
xlabel("base position",fontsize=8)
ylabel("fold coverage",fontsize=8)
xMin, xMax, yMin, yMax = min(data[:,0]),max(data[:,0]),min(data[:,1]),max(data[:,1])
yMax *= 1.1
axis([xMin,xMax,yMin,yMax])
plt.show()
ax2 = fig.add_subplot(212)
ax2.set_yscale('symlog')
fig.text(0.1,0.0,reportText, fontsize=9)
color ='k-'
plot(data[:,0],data[:,1],color)
xlabel("base position",fontsize=8)
ylabel("fold coverage",fontsize=8)
title_text = sys.argv[2]
suptitle(title_text,fontsize=9)
xMin, xMax, yMin, yMax = min(data[:,0]),max(data[:,0]),min(data[:,1]),max(data[:,1])
yMax *= 1.1
axis([xMin,xMax,yMin,yMax])
gcf().subplots_adjust(bottom=0.40)
plt.show()
if logPlot=='Y':
fig=plt.figure(figsize=[8.5,4.5])
ax = fig.add_subplot(111)
ax.set_yscale('symlog')
fig.text(0.1,0.0,reportText, fontsize=9)
color ='k-'
plot(data[:,0],data[:,1],color)
xlabel("base position",fontsize=8)
ylabel("fold coverage",fontsize=8)
title_text = sys.argv[2]
suptitle(title_text,fontsize=9)
xMin, xMax, yMin, yMax = min(data[:,0]),max(data[:,0]),min(data[:,1]),max(data[:,1])
yMax *= 1.1
axis([xMin,xMax,yMin,yMax])
gcf().subplots_adjust(bottom=0.60)
plt.show()
savefig(outputFile)
|
python
|
# *******************************************************************************
# Copyright (C) 2020-2021 INAF
#
# This software is distributed under the terms of the BSD-3-Clause license
#
# Authors:
# Ambra Di Piano <[email protected]>
# *******************************************************************************
import os
from os.path import isfile, expandvars
from sagsci.tools.utils import *
from sagsci.tools.photometry import *
# observation and target
obs_crab = 'data/crab_test_sim.fits'
target = {'ra': 83.6331, 'dec': 22.0145}
pointing = {'ra': 83.6331, 'dec': 22.5145}
# configuration
erange = [(0.03, 50)]
trange = [0, 100] # livetime in seconds (s)
radius = 0.2 # photometry region in degrees (deg)
spectral_index = -2.48 # slope of the power-law spectrum
irf = expandvars('$CTOOLS/share/caldb/data/cta/prod3b-v2/bcf/South_z20_0.5h/irf_file.fits')
# we need to add "radius" to the target dictionary
target['rad'] = radius
# init photometry
phm = Photometrics({'events_filename': obs_crab})
# remove duplicate files
offregionsfile = obs_crab.replace('.fits', '_off.reg')
if isfile(offregionsfile):
os.remove(obs_crab.replace('.fits', '_off.reg'))
# compute regions
off_regions = phm.find_off_regions(algo='cross', src=target, pnt=pointing, rad=target['rad'], save=offregionsfile)
for e in erange:
print(f'Target = {target} TeV')
print(f'Energy range = {e} s')
print(f'Time range = {trange} deg')
on, off, alpha, excess, sigma, err_note = phm.counting(src=target, rad=target['rad'], off_regions=off_regions, e_min=e[0], e_max=e[1], t_min=trange[0], t_max=trange[1], draconian=False)
print(f'on counts = {on} cts')
print(f'excess counts = {excess} cts')
print(f'significance = {sigma} cts')
exposure = get_aeff_in_region(target=target, pointing=pointing, trange=trange, erange=e, irf=irf, index=spectral_index)
print(f'aeff = {exposure} cm2')
livetime = trange[1]-trange[0]
print(f'livetime = {livetime} s')
# compute flux
flux = on / exposure / livetime
print(f'flux = {flux} ph/cm2/s')
print(f'\n{"-"*50}\n')
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
class Calculator:
def __init__(self, number1, number2):
self.number1 = int(number1)
self.number2 = int(number2)
def add(self):
print(self.number1 + self.number2)
return self.number1 + self.number2
def subtract(self):
print(self.number1 - self.number2)
return self.number1 - self.number2
def multiply(self):
print(self.number1 * self.number2)
return self.number1 * self.number2
def divide(self):
assert self.number2 > 0,"No soy tan inteligente como para dividir entre 0"
print(self.number1 / self.number2)
return self.number1 / self.number2
def execute(self,operation):
if operation == 'sumar':
return self.add()
if operation == 'restar':
return self.subtract()
if operation == 'multiplicar':
return self.multiply()
if operation == 'dividir':
return self.divide()
else:
print("Invalid operation")
if __name__ == "__main__":
operation = sys.argv[1]
number1 = sys.argv[2]
number2 = sys.argv[3]
calculator = Calculator(number1,number2)
calculator.execute(operation)
|
python
|
from flask import Blueprint
bp = Blueprint('auth', __name__)
from diploma.auth import auth, emails, forms, routes
|
python
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
""" This is the example of input record for the test_tranform_data. """
input_test_data = [
{
"targetingCriteria": {
"include": {
"and": [
{
"or": {
"urn:li:adTargetingFacet:titles": [
"urn:li:title:100",
"urn:li:title:10326",
"urn:li:title:10457",
"urn:li:title:10738",
"urn:li:title:10966",
"urn:li:title:11349",
"urn:li:title:1159",
]
}
},
{"or": {"urn:li:adTargetingFacet:locations": ["urn:li:geo:103644278"]}},
{"or": {"urn:li:adTargetingFacet:interfaceLocales": ["urn:li:locale:en_US"]}},
]
},
"exclude": {
"or": {
"urn:li:adTargetingFacet:facet_Key1": [
"facet_test1",
"facet_test2",
],
"urn:li:adTargetingFacet:facet_Key2": [
"facet_test3",
"facet_test4",
],
}
},
},
"changeAuditStamps": {
"created": {"time": 1629581275000},
"lastModified": {"time": 1629664544760},
},
"dateRange": {
"start": {"month": 8, "day": 13, "year": 2021},
"end": {"month": 8, "day": 13, "year": 2021},
},
"variables": {
"data": {
"com.linkedin.ads.SponsoredUpdateCreativeVariables": {
"activity": "urn:li:activity:1234",
"directSponsoredContent": 0,
"share": "urn:li:share:1234",
}
}
},
}
]
""" This is the expected output from the `transform_data` method. """
output_test_data = [
{
"targetingCriteria": {
"include": {
"and": [
{
"type": "urn:li:adTargetingFacet:titles",
"values": [
"urn:li:title:100",
"urn:li:title:10326",
"urn:li:title:10457",
"urn:li:title:10738",
"urn:li:title:10966",
"urn:li:title:11349",
"urn:li:title:1159",
],
},
{
"type": "urn:li:adTargetingFacet:locations",
"values": ["urn:li:geo:103644278"],
},
{
"type": "urn:li:adTargetingFacet:interfaceLocales",
"values": ["urn:li:locale:en_US"],
},
]
},
"exclude": {
"or": [
{
"type": "urn:li:adTargetingFacet:facet_Key1",
"values": ["facet_test1", "facet_test2"],
},
{
"type": "urn:li:adTargetingFacet:facet_Key2",
"values": ["facet_test3", "facet_test4"],
},
]
},
},
"variables": {
"type": "com.linkedin.ads.SponsoredUpdateCreativeVariables",
"values": [
{"key": "activity", "value": "urn:li:activity:1234"},
{"key": "directSponsoredContent", "value": 0},
{"key": "share", "value": "urn:li:share:1234"},
],
},
"created": "2021-08-21 21:27:55",
"lastModified": "2021-08-22 20:35:44",
"start_date": "2021-08-13",
"end_date": "2021-08-13",
}
]
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from mm.utils.opengl import Render
from mm.utils.mesh import generateFace
from mm.models import MeshModel
import numpy as np
import matplotlib.pyplot as plt
from skimage import io, img_as_float
if __name__ == '__main__':
# Load the first image from the video
frame = 0
img = io.imread('../data/obama/orig/%05d.png' % (frame + 1))
img = img_as_float(img)
width = img.shape[1]
height = img.shape[0]
# Load the 3DMM parameters that fit the 3DMM to each video frame
param = np.load('../data/obama/paramRTS2Orig.npy')
# Load the mesh model
m = MeshModel('../models/bfm2017.npz')
# Generate the vertex coordinates from the mesh model and the parameters
vertexCoords = generateFace(param[frame, :], m).T
# Use the mean vertex colors just for illustrative purposes
vertexColors = m.texMean.T
# Concatenate the vertex coordinates and colors-- this is how they will be inputted into the Render object
meshData = np.r_[vertexCoords, vertexColors]
# Initialize an OpenGL Render object and render the 3DMM with the corresponding video frame in the background
r = Render(width, height, meshData, m.face, indexed = False, img = img)
r.render()
# Grab the rendering from the video card
rendering = r.grabRendering()
# You can also get other parameters from the video card, such as the pixels where the 3DMM is rendered on, the index of the triangular face that contributes to the color of each pixel of these pixels, and the barycentric coordinates of such a triangular face such that the barycentric combination of the three vertex attributes (e.g. color) for this triangular face forms the color in the rendered pixel
rendering, pixelCoord, pixelFaces, pixelBarycentricCoords = r.grabRendering(return_info = True)
# Plot the rendering
plt.figure()
plt.imshow(rendering)
# Loop through some frames in the video to render some more 3DMMs
for frame in range(1, 52, 10):
img = io.imread('../data/obama/orig/%05d.png' % (frame + 1))
img = img_as_float(img)
vertexCoords = generateFace(param[frame, :], m).T
meshData = np.r_[vertexCoords, vertexColors]
# Update the video card with the new mesh data for the current frame
r.updateVertexBuffer(meshData)
# Erase the current rendering to prepare for the new rendering
r.resetFramebufferObject()
# And then render and plot the rendering
r.render()
rendering = r.grabRendering()
plt.figure()
plt.imshow(rendering)
|
python
|
import copy
from cantoolz.module import CANModule
class ecu_switch(CANModule):
name = "CAN Switch"
help = """
This module emulating CAN Switch.
Init params (example):
{
'Cabin': { # From Cabin interface
'OBD2':[ # To OBD2 allowed next ID
0x81, # Left door status
0x82 # Right door status
],
},
'Engine': {
'OBD2': [
0x79,
0x709
],
'Cabin':[
0x79
]
},
'OBD2': {
'Engine':[
0x701
],
}
}
"""
_active = True
def do_init(self, params):
self._rules = params
# Effect (could be fuzz operation, sniff, filter or whatever)
def do_effect(self, can_msg, args):
current_rule = self._rules.get(args['pipe'], {})
if can_msg.CANData and args['action'] == "read": # READ
for route_to, allowed_id in current_rule.items():
if can_msg.CANFrame.frame_id in allowed_id:
buffer = self._rules[route_to].get('buffer', [])
buffer.append(copy.deepcopy(can_msg.CANFrame))
self._rules[route_to].update({'buffer': buffer})
elif args['action'] == "write" and not can_msg.CANData: # Write
buffer_len = len(current_rule.get('buffer', []))
if buffer_len > 0:
can_msg.CANFrame = self._rules[args['pipe']]['buffer'].pop(0)
can_msg.CANData = True
can_msg.bus = self._bus
return can_msg
|
python
|
from collections import defaultdict
start, end = 357253, 892942
num_digits = 6
def solve(start, end, strict=False):
length = end - start
count = 0
for i in range(length):
number = start + i
previous = number % 10
consecutives = defaultdict(int)
for j in range(1, num_digits):
p = 10 ** j
digit = number // p % 10
if digit > previous:
break
if previous == digit:
consecutives[digit] += 1
previous = digit
else:
if (strict and 1 in consecutives.values()) or (not strict and consecutives):
count += 1
return count
if __name__ == "__main__":
# Part I
print(solve(start, end))
# Part II
print(solve(start, end, strict=True))
|
python
|
# -----------------------------------------------------------------------------
# NDN Repo getfile client.
#
# @Author [email protected]
# @Date 2019-10-24
# -----------------------------------------------------------------------------
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import asyncio as aio
import logging
from ndn.app import NDNApp
from ndn.encoding import Name, NonStrictName
from ..utils.concurrent_fetcher import concurrent_fetcher
class GetfileClient(object):
"""
This client fetches a file from the repo, and save it to working directory.
"""
def __init__(self, app: NDNApp, repo_name):
"""
A client to retrieve files from the remote repo.
:param app: NDNApp.
:param repo_name: NonStrictName. Routable name to remote repo.
"""
self.app = app
self.repo_name = repo_name
async def fetch_file(self, name_at_repo: NonStrictName):
"""
Fetch a file from remote repo, and write to the current working directory.
:param name_at_repo: NonStrictName. The name with which this file is stored in the repo.
"""
semaphore = aio.Semaphore(10)
b_array = bytearray()
async for (_, _, content, _) in concurrent_fetcher(self.app, name_at_repo, 0, None, semaphore):
b_array.extend(content)
if len(b_array) > 0:
filename = Name.to_str(name_at_repo)
filename = filename.strip().split('/')[-1]
logging.info(f'Fetching completed, writing to file {filename}')
with open(filename, 'wb') as f:
f.write(b_array)
|
python
|
import attr
import logging
from typing import Callable
from mmds.exceptions import PackageNotFoundError
try:
from PIL import Image
except:
raise PackageNotFoundError("pillow", by="rgbs modality.")
from .ts import TimeSeriesModality
logger = logging.getLogger(__name__)
dumb_image = Image.new("RGB", (32, 32))
@attr.define
class RgbsModality(TimeSeriesModality):
"""A rgb sequence modality for video."""
transform: Callable
aggragate: Callable
@property
def duration(self):
return len(self.paths) / self.sample_rate
def _fetch_impl(self, *, info={}):
paths = self._slice(self.paths, info.get("t0"), info.get("t1"))
frames = list(map(self.transform, map(self._load_pil, paths)))
return self.aggragate(frames)
def _pad(self, x, n):
return x + [None] * n
@staticmethod
def _load_pil(path):
if path is None:
return dumb_image
try:
image = Image.open(path)
except:
logger.warning(f"Open {path} failed, use an empty picture instead.")
image = dumb_image
return image
|
python
|
all_teams = ["ANC", "APO", "CSU", "GUC", "LTI", "MIN", "MRL", "NAI", "POS", "RI1", "RAK", "SOS", "ZJU"]
semi_teams = ["APO", "CSU", "GUC", "MIN", "MRL", "POS", "SOS", "ZJU"]
team_names = {
# "BAS" : "Baseline (no agents)",
"ANC" : "anct_rescue2013",
"APO" : "Apollo-Rescue",
"CSU" : "CSU-YUNLU",
"GUC" : "GUC_ArtSapience",
"LTI" : "LTI-Agent-Rescue",
"MIN" : "MinERS",
"MRL" : "MRL",
"NAI" : "NAITO-Rescue2013",
"POS" : "Poseidon",
"RI1" : "Ri-one",
"RAK" : "RoboAKUT",
"SOS" : "S.O.S.",
"ZJU" : "ZJUBase"
}
day1 = {'name' : "Day 1",
'shortname' : "Day1",
'maps' : ["Berlin1", "Eindhoven1", "Kobe1", "Paris1", "VC1"],
'teams' : all_teams}
day2 = {'name' : "Day 2",
'shortname' : "Day2",
'maps' : ["Mexico1", "Kobe2", "Eindhoven2", "Istanbul1", "Paris2"],
'teams' : all_teams,
'merge_with' : day1,
'highlight' : 8}
semi = {'name' : "Semifinals",
'shortname' : "Semifinals",
'maps' : ["VC2", "Berlin2", "Kobe3", "Istanbul2", "Mexico2", "Eindhoven3", "Paris3", "Eindhoven4"],
'teams' : semi_teams,
'highlight' : 4}
# final = {'name' : "Finals",
# 'shortname' : "final",
# 'maps' : ["Eindhoven1"],
# 'teams' : all_teams,
# 'merge_with' : day3,
# 'show_ranks' : 1}
rounds = [day1, day2, semi]
# semi_teams = ["RAK", "SBC", "POS", "IAM", "MRL", "RI1", "SEU", "RMA"]
# final_teams = ["POS", "IAM", "SEU", "RMA"]
# day1 = {'name' : "Preliminaries Day 1",
# 'shortname' : "Preliminary1",
# 'maps' : ["VC1", "Paris1", "Kobe1", "Berlin1", "Istanbul1"],
# 'teams' : all_teams}
# day2 = {'name' : "Preliminaries Day 2",
# 'shortname' : "Preliminary2",
# 'maps' : ["Kobe2", "Paris2", "Istanbul2", "Berlin2", "VC2"],
# 'teams' : all_teams
# 'merge_with' : day1
# 'highlight' : 8}
# semi = {'name' : "Semifinals",
# 'shortname' : "Semifinals",
# 'maps' : ["Kobe2", "Paris2", "Istanbul2", "Berlin2", "VC2"],
# 'teams' : semi_teams,
# 'highlight' : 4}
# final = {'name' : "Finals",
# 'shortname' : "Finals",
# 'maps' : ["Kobe2", "Paris2", "Istanbul2", "Berlin2", "VC2"],
# 'teams' : ["Paris5", "Berlin5", "Kobe4", "Istanbul5", "VC5"],
# 'show_ranks' : 3}
# rounds = [day1, day2, semi, final]
log_location = "logs/2013"
add_downloads = True
|
python
|
import warnings
class AppPlatformError(Exception):
"""
Raised by :meth:`Client.request()` for requests that:
- Return a non-200 HTTP response, or
- Connection refused/timeout or
- Response timeout or
- Malformed request
- Have a malformed/missing header in the response.
"""
def __init__(self, exc_message, status_code, error_code=None):
super(AppPlatformError, self).__init__(exc_message)
self.status_code = status_code
self.error_code = error_code
class ServerError(AppPlatformError):
"""
For 500-level responses from the server
"""
class ClientError(AppPlatformError):
"""
For 400-level responses from the server
"""
class InputNotUnderstoodError(Exception):
"""
Raised if a method is called in a way that cannot be understood
"""
class AllRetriesFailedError(Exception):
"""Raised when the retry manager does not successfully make a request"""
class InvalidModelCategoryError(Exception):
"""
Raised when method specific for model category was called from wrong model
"""
class AsyncTimeoutError(Exception):
"""
Raised when an asynchronous operation did not successfully get resolved
within a specified time limit
"""
class AsyncFailureError(Exception):
"""
Raised when querying an asynchronous status resulted in an exceptional
status code (not 200 and not 303)
"""
class ProjectAsyncFailureError(AsyncFailureError):
"""
When an AsyncFailureError occurs during project creation or finalizing the project
settings for modeling. This exception will have the attributes ``status_code``
indicating the unexpected status code from the server, and ``async_location`` indicating
which asynchronous status object was being polled when the failure happened.
"""
def __init__(self, exc_message, status_code, async_location):
super(ProjectAsyncFailureError, self).__init__(exc_message)
self.status_code = status_code
self.async_location = async_location
class AsyncProcessUnsuccessfulError(Exception):
"""
Raised when querying an asynchronous status showed that async process
was not successful
"""
class AsyncModelCreationError(Exception):
"""
Raised when querying an asynchronous status showed that model creation
was not successful
"""
class AsyncPredictionsGenerationError(Exception):
"""
Raised when querying an asynchronous status showed that predictions
generation was not successful
"""
class PendingJobFinished(Exception):
"""
Raised when the server responds with a 303 for the pending creation of a
resource.
"""
class JobNotFinished(Exception):
"""
Raised when execution was trying to get a finished resource from a pending
job, but the job is not finished
"""
class DuplicateFeaturesError(Exception):
"""
Raised when trying to create featurelist with duplicating features
"""
class DataRobotDeprecationWarning(DeprecationWarning):
"""
Raised when using deprecated functions or using functions in a deprecated way
"""
pass
class IllegalFileName(Exception):
"""
Raised when trying to use a filename we can't handle.
"""
class JobAlreadyRequested(ClientError):
"""
Raised when the requested model has already been requested.
"""
warnings.filterwarnings('default', category=DataRobotDeprecationWarning)
|
python
|
import setuptools
from glob import glob
setuptools.setup(
name="noteboard-extension",
version='0.1.0',
url="https://github.com/yuvipanda/noteboard",
author="Yuvi Panda",
description="Simple Jupyter extension to emit events about current notebooks to a noteboard server",
data_files=[
('share/jupyter/nbextensions/noteboard', glob('*.js'))
],
packages=setuptools.find_packages()
)
|
python
|
import os.path as osp
import numpy as np
class Dataset(object):
def __init__(self, ids, labels, is_train=True, name='default'):
self._ids = list(ids)
self._labels = labels
self.name = name
self.is_train = is_train
def get_data(self, id):
activity = np.load(id)
label = self._labels[id]
return activity, label
def get_data_ROI(self, id):
activity = np.load(id)
activity = activity/255.*2-1
label = self._labels[id]
return activity, label
@property
def ids(self):
return self._ids
def __len__(self):
return len(self.ids)
def __repr__(self):
return 'Dataset (%s, %d examples)' % (
self.name,
len(self)
)
def create_default_splits(path, is_train=True):
train_ids, train_labels = get_activity_path_and_label(osp.join(path, 'train'))
val_ids, val_labels = get_activity_path_and_label(osp.join(path, 'val'))
test_ids, test_labels = get_activity_path_and_label(osp.join(path, 'test'))
dataset_train = Dataset(train_ids, train_labels, name='train', is_train=True)
dataset_val = Dataset(val_ids, val_labels, name='val', is_train=False)
dataset_test = Dataset(test_ids, test_labels, name='test', is_train=False)
return dataset_train, dataset_val, dataset_test
def get_activity_path_and_label(path):
ids = []
labels = {}
with open(osp.join(path, 'label.txt')) as f:
lines = [line.strip() for line in f.readlines()]
for line in lines:
newline = list(filter(str.strip, line.split(' ')))
id = osp.join(path, newline[0])
ids.append(id)
labels[id] = np.array([float(n) for n in newline[1:]])
rs = np.random.RandomState(123)
rs.shuffle(ids)
return ids, labels
|
python
|
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
#color spaces --> rgb spaces,grayscal
img = cv.imread('photos/dog.jpg')
cv.imshow('Dog',img)
# plt.imshow(img)
# plt.show()
# point to note --> there is conversion of colour spaces in matplotlib
# BGR to grayscale -->
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
cv.imshow('Grayscale',gray)
#BGR to HSV -->
hsv =cv.cvtColor(img,cv.COLOR_BGR2HSV_FULL)
cv.imshow('HSV',hsv)
#BGR to L*A*B
lab =cv.cvtColor(img,cv.COLOR_BGR2LAB)
cv.imshow('Lab',lab)
#BGR to RGB
rgb = cv.cvtColor(img,cv.COLOR_BGR2RGB)
cv.imshow('RGB',rgb)
plt.imshow(rgb)
plt.show()
#cann't convert hsv to BGR
# HSV to BGR
hsv_bgr =cv.cvtColor(img,cv.COLOR_HSV2BGR)
cv.imshow('HSV-> BGR',hsv_bgr)
cv.waitKey(0)
|
python
|
from rest_framework import serializers
from users.models.organizations import Organization
class OrganizationSerializer(serializers.ModelSerializer):
class Meta:
model = Organization
fields = ('id', 'name', 'domain')
|
python
|
import sys
from data_wrapper.settings import MESSAGES
class DataWrapper:
def __init__(self, db=None, params=None, environment=None):
if db:
if db.lower() == 'mongodb':
from data_wrapper.mongodb_wrapper import MongodbDbWrapper
self.db = True
self.my_data = MongodbDbWrapper(params)
elif db.lower() == 'mysql':
from data_wrapper.mysql_wrapper import MysqlDbWrapper
self.db = True
self.my_data = MysqlDbWrapper(params, environment)
else:
print(MESSAGES["WRONG_DATABASE"])
sys.exit()
else:
self.db = False
self.my_data = {}
def __setitem__(self, key, value):
if not self.db and key not in self.my_data:
self.my_data[key] = value
self.my_data.__setitem__(key, value)
def __getitem__(self, item):
if not self.db and item not in self.my_data:
self.my_data[item] = []
return self.my_data[item]
def __delitem__(self, key):
self.my_data.__delitem__(key)
|
python
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import re
import shutil
import subprocess
import sys
import tempfile
from telemetry.internal.util import binary_manager
class MinidumpSymbolizer(object):
def __init__(self, os_name, arch_name, dump_finder, build_dir):
"""Abstract class for handling all minidump symbolizing code.
Args:
os_name: The OS of the host (if running the test on a device), or the OS
of the test machine (if running the test locally).
arch_name: The arch name of the host (if running the test on a device), or
the OS of the test machine (if running the test locally).
dump_finder: The minidump_finder.MinidumpFinder instance that is being
used to find minidumps for the test.
build_dir: The directory containing Chromium build artifacts to generate
symbols from.
"""
self._os_name = os_name
self._arch_name = arch_name
self._dump_finder = dump_finder
self._build_dir = build_dir
def SymbolizeMinidump(self, minidump):
"""Gets the stack trace from the given minidump.
Args:
minidump: the path to the minidump on disk
Returns:
None if the stack could not be retrieved for some reason, otherwise a
string containing the stack trace.
"""
if self._os_name == 'win':
cdb = self._GetCdbPath()
if not cdb:
logging.warning('cdb.exe not found.')
return None
# Move to the thread which triggered the exception (".ecxr"). Then include
# a description of the exception (".lastevent"). Also include all the
# threads' stacks ("~*kb30") as well as the ostensibly crashed stack
# associated with the exception context record ("kb30"). Note that stack
# dumps, including that for the crashed thread, may not be as precise as
# the one starting from the exception context record.
# Specify kb instead of k in order to get four arguments listed, for
# easier diagnosis from stacks.
output = subprocess.check_output([cdb, '-y', self.browser_directory,
'-c', '.ecxr;.lastevent;kb30;~*kb30;q',
'-z', minidump])
# The output we care about starts with "Last event:" or possibly
# other things we haven't seen yet. If we can't find the start of the
# last event entry, include output from the beginning.
info_start = 0
info_start_match = re.search("Last event:", output, re.MULTILINE)
if info_start_match:
info_start = info_start_match.start()
info_end = output.find('quit:')
return output[info_start:info_end]
stackwalk = binary_manager.FetchPath(
'minidump_stackwalk', self._arch_name, self._os_name)
if not stackwalk:
logging.warning('minidump_stackwalk binary not found.')
return None
# We only want this logic on linux platforms that are still using breakpad.
# See crbug.com/667475
if not self._dump_finder.MinidumpObtainedFromCrashpad(minidump):
with open(minidump, 'rb') as infile:
minidump += '.stripped'
with open(minidump, 'wb') as outfile:
outfile.write(''.join(infile.read().partition('MDMP')[1:]))
symbols_dir = tempfile.mkdtemp()
try:
self._GenerateBreakpadSymbols(symbols_dir, minidump)
return subprocess.check_output([stackwalk, minidump, symbols_dir],
stderr=open(os.devnull, 'w'))
finally:
shutil.rmtree(symbols_dir)
def GetSymbolBinaries(self, minidump):
"""Returns a list of paths to binaries where symbols may be located.
Args:
minidump: The path to the minidump being symbolized.
"""
raise NotImplementedError()
def GetBreakpadPlatformOverride(self):
"""Returns the platform to be passed to generate_breakpad_symbols."""
return None
def _GenerateBreakpadSymbols(self, symbols_dir, minidump):
"""Generates Breakpad symbols for use with stackwalking tools.
Args:
symbols_dir: The directory where symbols will be written to.
minidump: The path to the minidump being symbolized.
"""
logging.info('Dumping Breakpad symbols.')
generate_breakpad_symbols_command = binary_manager.FetchPath(
'generate_breakpad_symbols', self._arch_name, self._os_name)
if not generate_breakpad_symbols_command:
logging.warning('generate_breakpad_symbols binary not found')
return
for binary_path in self.GetSymbolBinaries(minidump):
cmd = [
sys.executable,
generate_breakpad_symbols_command,
'--binary=%s' % binary_path,
'--symbols-dir=%s' % symbols_dir,
'--build-dir=%s' % self._build_dir,
]
if self.GetBreakpadPlatformOverride():
cmd.append('--platform=%s' % self.GetBreakpadPlatformOverride())
try:
subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
logging.error(e.output)
logging.warning('Failed to execute "%s"', ' '.join(cmd))
return
|
python
|
# ------------------------------------------------------------------------------
# Portions of this code are from
# OpenPCDet (https://github.com/open-mmlab/OpenPCDet)
# Licensed under the Apache License.
# ------------------------------------------------------------------------------
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from det3d.core.bbox import box_torch_ops
from .target_assigner.proposal_target_layer import ProposalTargetLayer
def limit_period(val, offset=0.5, period=np.pi):
return val - torch.floor(val / period + offset) * period
class RoIHeadTemplate(nn.Module):
def __init__(self, num_class, model_cfg):
super().__init__()
self.model_cfg = model_cfg
self.num_class = num_class
self.proposal_target_layer = ProposalTargetLayer(roi_sampler_cfg=self.model_cfg.TARGET_CONFIG)
self.forward_ret_dict = None
def make_fc_layers(self, input_channels, output_channels, fc_list):
fc_layers = []
pre_channel = input_channels
for k in range(0, fc_list.__len__()):
fc_layers.extend([
nn.Conv1d(pre_channel, fc_list[k], kernel_size=1, bias=False),
nn.BatchNorm1d(fc_list[k]),
nn.ReLU()
])
pre_channel = fc_list[k]
if self.model_cfg.DP_RATIO >= 0 and k == 0:
fc_layers.append(nn.Dropout(self.model_cfg.DP_RATIO))
fc_layers.append(nn.Conv1d(pre_channel, output_channels, kernel_size=1, bias=True))
fc_layers = nn.Sequential(*fc_layers)
return fc_layers
def assign_targets(self, batch_dict):
batch_size = batch_dict['batch_size']
with torch.no_grad():
targets_dict = self.proposal_target_layer.forward(batch_dict)
rois = targets_dict['rois'] # (B, N, 7 + C)
gt_of_rois = targets_dict['gt_of_rois'] # (B, N, 7 + C + 1)
targets_dict['gt_of_rois_src'] = gt_of_rois.clone().detach()
roi_ry = limit_period(rois[:, :, 6], offset=0.5, period=np.pi*2)
gt_of_rois[:, :, :6] = gt_of_rois[:, :, :6] - rois[:, :, :6]
gt_of_rois[:, :, 6] = gt_of_rois[:, :, 6] - roi_ry
gt_of_rois = box_torch_ops.rotate_points_along_z(
points=gt_of_rois.view(-1, 1, gt_of_rois.shape[-1]), angle=-roi_ry.view(-1)
).view(batch_size, -1, gt_of_rois.shape[-1])
if rois.shape[-1] == 9:
# rotate velocity
gt_of_rois[:, :, 7:-1] = gt_of_rois[:, :, 7:-1] - rois[:, :, 7:]
"""
roi_vel = gt_of_rois[:, :, 7:-1]
roi_vel = torch.cat([roi_vel, torch.zeros([roi_vel.shape[0], roi_vel.shape[1], 1]).to(roi_vel)], dim=-1)
gt_of_rois[:, :, 7:-1] = box_torch_ops.rotate_points_along_z(
points=roi_vel.view(-1, 1, 3), angle=-roi_ry.view(-1)
).view(batch_size, -1, 3)[..., :2]
"""
# flip orientation if rois have opposite orientation
heading_label = gt_of_rois[:, :, 6] % (2 * np.pi) # 0 ~ 2pi
opposite_flag = (heading_label > np.pi * 0.5) & (heading_label < np.pi * 1.5)
heading_label[opposite_flag] = (heading_label[opposite_flag] + np.pi) % (2 * np.pi) # (0 ~ pi/2, 3pi/2 ~ 2pi)
flag = heading_label > np.pi
heading_label[flag] = heading_label[flag] - np.pi * 2 # (-pi/2, pi/2)
heading_label = torch.clamp(heading_label, min=-np.pi / 2, max=np.pi / 2)
gt_of_rois[:, :, 6] = heading_label
targets_dict['gt_of_rois'] = gt_of_rois
return targets_dict
def get_box_reg_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg.LOSS_CONFIG
code_size = forward_ret_dict['rcnn_reg'].shape[-1]
reg_valid_mask = forward_ret_dict['reg_valid_mask'].view(-1)
gt_boxes3d_ct = forward_ret_dict['gt_of_rois'][..., 0:code_size]
rcnn_reg = forward_ret_dict['rcnn_reg'] # (rcnn_batch_size, C)
rcnn_batch_size = gt_boxes3d_ct.view(-1, code_size).shape[0]
fg_mask = (reg_valid_mask > 0)
fg_sum = fg_mask.long().sum().item()
tb_dict = {}
if loss_cfgs.REG_LOSS == 'L1':
reg_targets = gt_boxes3d_ct.view(rcnn_batch_size, -1)
rcnn_loss_reg = F.l1_loss(
rcnn_reg.view(rcnn_batch_size, -1),
reg_targets,
reduction='none'
) # [B, M, 7]
rcnn_loss_reg = rcnn_loss_reg * rcnn_loss_reg.new_tensor(\
loss_cfgs.LOSS_WEIGHTS['code_weights'])
rcnn_loss_reg = (rcnn_loss_reg.view(rcnn_batch_size, -1) * fg_mask.unsqueeze(dim=-1).float()).sum() / max(fg_sum, 1)
rcnn_loss_reg = rcnn_loss_reg * loss_cfgs.LOSS_WEIGHTS['rcnn_reg_weight']
tb_dict['rcnn_loss_reg'] = rcnn_loss_reg.detach()
else:
raise NotImplementedError
return rcnn_loss_reg, tb_dict
def get_box_cls_layer_loss(self, forward_ret_dict):
loss_cfgs = self.model_cfg.LOSS_CONFIG
rcnn_cls = forward_ret_dict['rcnn_cls']
rcnn_cls_labels = forward_ret_dict['rcnn_cls_labels'].view(-1)
if loss_cfgs.CLS_LOSS == 'BinaryCrossEntropy':
rcnn_cls_flat = rcnn_cls.view(-1)
batch_loss_cls = F.binary_cross_entropy(torch.sigmoid(rcnn_cls_flat), rcnn_cls_labels.float(), reduction='none')
cls_valid_mask = (rcnn_cls_labels >= 0).float()
rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
elif loss_cfgs.CLS_LOSS == 'CrossEntropy':
batch_loss_cls = F.cross_entropy(rcnn_cls, rcnn_cls_labels, reduction='none', ignore_index=-1)
cls_valid_mask = (rcnn_cls_labels >= 0).float()
rcnn_loss_cls = (batch_loss_cls * cls_valid_mask).sum() / torch.clamp(cls_valid_mask.sum(), min=1.0)
else:
raise NotImplementedError
rcnn_loss_cls = rcnn_loss_cls * loss_cfgs.LOSS_WEIGHTS['rcnn_cls_weight']
tb_dict = {'rcnn_loss_cls': rcnn_loss_cls.detach()}
return rcnn_loss_cls, tb_dict
def get_loss(self, tb_dict=None):
tb_dict = {} if tb_dict is None else tb_dict
rcnn_loss = 0
rcnn_loss_cls, cls_tb_dict = self.get_box_cls_layer_loss(self.forward_ret_dict)
rcnn_loss += rcnn_loss_cls
tb_dict.update(cls_tb_dict)
rcnn_loss_reg, reg_tb_dict = self.get_box_reg_layer_loss(self.forward_ret_dict)
rcnn_loss += rcnn_loss_reg
tb_dict.update(reg_tb_dict)
tb_dict['rcnn_loss'] = rcnn_loss.item()
return rcnn_loss, tb_dict
def generate_predicted_boxes(self, batch_size, rois, cls_preds, box_preds):
"""
Args:
batch_size:
rois: (B, N, 7)
cls_preds: (BN, num_class)
box_preds: (BN, code_size)
Returns:
"""
code_size = box_preds.shape[-1]
# batch_cls_preds: (B, N, num_class or 1)
batch_cls_preds = cls_preds.view(batch_size, -1, cls_preds.shape[-1])
batch_box_preds = box_preds.view(batch_size, -1, code_size)
roi_ry = rois[:, :, 6].view(-1)
roi_xyz = rois[:, :, 0:3].view(-1, 3)
local_rois = rois.clone().detach()
local_rois[:, :, 0:3] = 0
batch_box_preds = (batch_box_preds + local_rois).view(-1, code_size)
batch_box_preds = box_torch_ops.rotate_points_along_z(
batch_box_preds.unsqueeze(dim=1), roi_ry
).squeeze(dim=1)
batch_box_preds[:, 0:3] += roi_xyz
batch_box_preds = batch_box_preds.view(batch_size, -1, code_size)
return batch_cls_preds, batch_box_preds
|
python
|
import typing
def printBinary(n: int) -> None:
if n > 1:
printBinary(n // 2)
print(n % 2, end = "")
def main() -> None:
N = int(input("Input an integer:\n"))
printBinary(N)
return None
main()
|
python
|
# Authors: David Alexander, Lance Hepler
from __future__ import absolute_import, division, print_function
from GenomicConsensus.arrow.utils import allSingleBaseMutations
from GenomicConsensus.variants import Variant
from GenomicConsensus.quiver.diploid import variantsFromAlignment
import numpy as np
import ConsensusCore2 as cc
# IUPAC reference:
# http://www.bioinformatics.org/sms/iupac.html
_packIupac = { ("A", "G") : "R" ,
("G", "A") : "R" ,
("C", "T") : "Y" ,
("T", "C") : "Y" ,
("G", "C") : "S" ,
("C", "G") : "S" ,
("A", "T") : "W" ,
("T", "A") : "W" ,
("G", "T") : "K" ,
("T", "G") : "K" ,
("A", "C") : "M" ,
("C", "A") : "M" }
_unpackIupac = { "R" : ("A", "G") ,
"Y" : ("C", "T") ,
"S" : ("G", "C") ,
"W" : ("A", "T") ,
"K" : ("G", "T") ,
"M" : ("A", "C") }
def packIUPAC(bases):
return _packIupac[bases]
def unpackIUPAC(iupacCode):
return _unpackIupac[iupacCode]
def isHeterozygote(base):
return (base in _unpackIupac)
def packMuts(cssBase, mut1, mut2):
# Turn two muts (with same Start, End, LengthDiff) into a single mutation to
# IUPAC. The no-op mutation is coded as None.
#
# Example1: (_, Subs A, Subs T) -> Subs W
# Example2: (_, Ins A, Ins T) -> Ins W
# Example3: (A, None, Subs T) -> Subs W
#
nonNullMut = mut1 or mut2
start = nonNullMut.Start()
mutType = nonNullMut.Type()
newBase1 = mut1.Bases() if mut1 else cssBase
newBase2 = mut2.Bases() if mut2 else cssBase
newBasePacked = packIUPAC((newBase1, newBase2))
return cc.Mutation(mutType, start, newBasePacked)
def scoresForPosition(ai, pos):
muts = allSingleBaseMutations(str(ai), positions=[pos])
noMutScore = [0] * ai.NumReads()
mutScores_ = [ ai.ReadLLs(mut)
for mut in muts ]
mutScores = np.column_stack([noMutScore] + mutScores_).astype(np.float32)
return mutScores
def variantsFromConsensus(refWindow, refSequenceInWindow, cssSequenceInWindow,
cssQvInWindow=None, siteCoverage=None, aligner="affine",
ai=None):
"""
Compare the consensus and the reference in this window, returning
a list of variants.
Uses the integrator to identify heterozygous variants.
"""
assert (cssQvInWindow is None) == (siteCoverage is None) # Both or none
refId, refStart, refEnd = refWindow
if ai is not None:
#
# Hunting diploid variants:
# 1. find confident heterozygous sites;
# 2. build a "diploid consensus" using IUPAC encoding
# for het sites; mark cssQv accordingly
# 3. align diploid consensus to reference
# 4. extract and decorate variants
#
assert str(ai) == cssSequenceInWindow
iupacMutations = [] # List of (Mutation, confidence)
for pos in xrange(0, ai.Length()):
ds = cc.IsSiteHeterozygous(scoresForPosition(ai, pos), 40)
if ds:
muts = [None] + list(allSingleBaseMutations(cssSequenceInWindow, positions=[pos]))
mut0 = muts[ds.Allele0]
mut1 = muts[ds.Allele1]
cssBase = cssSequenceInWindow[pos]
packedMut = packMuts(cssBase, mut0, mut1)
iupacMutations.append((packedMut, 40))
# Create diploidCss by applying mutations, meanwhile updating the
# confidence vector accordingly.
diploidCss = cc.ApplyMutations([pair[0] for pair in iupacMutations],
cssSequenceInWindow)
diploidQv = list(cssQvInWindow) if cssQvInWindow is not None else None
runningLengthDiff = 0
for (mut, conf) in iupacMutations:
start = mut.Start() + runningLengthDiff
end = mut.End() + runningLengthDiff
diploidQv[start:end] = [conf]
assert len(diploidCss) == len(diploidQv)
cssSequenceInWindow = diploidCss
cssQvInWindow = diploidQv
vars = variantsFromAlignment(refWindow,
refSequenceInWindow, cssSequenceInWindow,
cssQvInWindow, siteCoverage)
return vars
|
python
|
"""Tests for the /sessions/.../commands routes."""
import pytest
from datetime import datetime
from decoy import Decoy, matchers
from fastapi import FastAPI
from fastapi.testclient import TestClient
from httpx import AsyncClient
from typing import Callable, Awaitable
from tests.helpers import verify_response
from opentrons.protocol_engine import (
CommandStatus,
commands as pe_commands,
errors as pe_errors,
)
from robot_server.service.json_api import ResponseModel
from robot_server.sessions.session_models import BasicSession, SessionCommandSummary
from robot_server.sessions.engine_store import EngineStore
from robot_server.sessions.router.base_router import get_session as real_get_session
from robot_server.sessions.router.commands_router import (
commands_router,
CommandNotFound,
)
@pytest.fixture
def get_session(decoy: Decoy) -> Callable[..., Awaitable[ResponseModel]]:
"""Get a mock version of the get_session route handler."""
return decoy.mock(func=real_get_session)
@pytest.fixture(autouse=True)
def setup_app(
get_session: Callable[..., Awaitable[ResponseModel]],
app: FastAPI,
) -> None:
"""Setup the FastAPI app with commands routes and dependencies."""
app.dependency_overrides[real_get_session] = get_session
app.include_router(commands_router)
async def test_get_session_commands(
decoy: Decoy,
get_session: Callable[..., Awaitable[ResponseModel]],
async_client: AsyncClient,
) -> None:
"""It should return a list of all commands in a session."""
command_summary = SessionCommandSummary(
id="command-id",
commandType="moveToWell",
status=CommandStatus.RUNNING,
)
session_response = BasicSession(
id="session-id",
createdAt=datetime(year=2021, month=1, day=1),
actions=[],
commands=[command_summary],
)
decoy.when(
await get_session(
sessionId="session-id",
session_view=matchers.Anything(),
session_store=matchers.Anything(),
engine_store=matchers.Anything(),
),
).then_return(
ResponseModel(data=session_response) # type: ignore[arg-type]
)
response = await async_client.get("/sessions/session-id/commands")
verify_response(response, expected_status=200, expected_data=[command_summary])
def test_get_session_command_by_id(
decoy: Decoy,
engine_store: EngineStore,
client: TestClient,
) -> None:
"""It should return full details about a command by ID."""
command = pe_commands.MoveToWell(
id="command-id",
status=CommandStatus.RUNNING,
createdAt=datetime(year=2022, month=2, day=2),
data=pe_commands.MoveToWellData(pipetteId="a", labwareId="b", wellName="c"),
)
decoy.when(engine_store.engine.state_view.commands.get("command-id")).then_return(
command
)
response = client.get("/sessions/session-id/commands/command-id")
verify_response(response, expected_status=200, expected_data=command)
def test_get_session_command_missing_command(
decoy: Decoy,
engine_store: EngineStore,
client: TestClient,
) -> None:
"""It should 404 if you attempt to get a non-existent command."""
key_error = pe_errors.CommandDoesNotExistError("oh no")
decoy.when(engine_store.engine.state_view.commands.get("command-id")).then_raise(
key_error
)
response = client.get("/sessions/session-id/commands/command-id")
verify_response(
response,
expected_status=404,
expected_errors=CommandNotFound(detail=str(key_error)),
)
|
python
|
import discord
from discord.ext import commands
import steam
from steam import WebAPI, SteamID
import keys
##TODO: convert to psql or something
import pickle
import asyncio
import os
import re
## Used to track the worst players in MM
class PlayerTracker(commands.Cog):
## Init
def __init__(self, bot):
self.bot = bot
self.fileLock = asyncio.Lock()
self.filePath = "{0}/trackerDB.pickle".format(os.getcwd())
self.loadDatabase()
self.urlRegex = r"\.com/players/(\d+)"
self.api = WebAPI(keys.STEAM_WEBAPI)
## Adds name and comments to the tracker
@commands.command(help="Add a player to the MM Tracker")
async def add(self, ctx, name : str, *, description : str):
print("adding {0} as {1}".format(name, description))
self.database.setdefault(name.lower(), {"name" : name, "description" : []})
self.database[name.lower()]["description"].append(description)
await self.saveDatabase()
await ctx.message.add_reaction('✅')
## searches the tracker for player matches based on query
## this is a rudimentary case insensitive substring search
@commands.command(help="Search the MM Tracker for players")
async def search(self, ctx, *, name : str):
nameL = name.lower()
out = []
async with self.fileLock:
for k in self.database:
if nameL in k:
out.append(self.database[k]["name"])
await ctx.send("\n".join(out))
## display a player and their comments
@commands.command(help="Display information about a specific player in the MM Tracker")
async def display(self, ctx, *, name : str):
async with self.fileLock:
if name.lower() in self.database:
user = self.database[name.lower()]
desc = " **-** {0}".format("\n **-** ".join(user["description"]))
embed = discord.Embed()
embed.color = discord.Colour.purple()
embed.title = user["name"]
embed.add_field(name="Comments", value=desc)
embed.set_footer(text="Added {0} times".format(len(user["description"])))
if("id" in user and not user["id"] is None):
await self.steamEmbed(embed, user["id"])
await ctx.send(embed=embed)
else:
await ctx.send("`{0}` not found".format(name))
async def steamEmbed(self, embed, steamId):
profile = await self.getProfile(steamId)
embed.set_thumbnail(url=profile["avatarfull"])
embed.url = profile["profileurl"]
embed.description = embed.title
embed.title = profile["personaname"]
##75419738
## associate an actual steam profile
@commands.command(help="Associate a steam profile with a given MM Tracker profile.\n <identifier> can be a Steam ID 32/64, Steam Profile url, or DotaBuff/Opendota link.")
async def addProfile(self, ctx, name : str, identifier : str):
if name.lower() in self.database:
user = self.database[name.lower()]
identifier = self.resolveIdentifier(identifier)
try:
self.api.ISteamUser.GetPlayerSummaries(steamids=identifier)
except:
await ctx.send("Error retrieving Steam profile.")
return
user["id"] = SteamID(identifier).as_64
await self.saveDatabase()
await ctx.message.add_reaction('✅')
else:
await ctx.send("`{0}` not found".format(name))
def resolveIdentifier(self, identifier):
m = re.search(self.urlRegex, identifier)
if(m):
return(int(m.group(1)))
ident = steam.steamid.steam64_from_url(identifier)
if(ident):
return(ident)
try:
int(identifier)
return(identifier)
except Exception as e:
pass
return
async def getProfile(self, steamId):
return(self.api.ISteamUser.GetPlayerSummaries(steamids=SteamID(steamId).as_64)["response"]["players"][0])
## Load the tracker database (TODO: convert to psql)
def loadDatabase(self):
self.database = {}
if(os.path.isfile(self.filePath)):
with open(self.filePath, "rb") as f:
self.database = pickle.load(f)
## Save the tracker database (TODO: convert to psql)
async def saveDatabase(self):
async with self.fileLock:
with open(self.filePath, "wb") as f:
pickle.dump(self.database, f)
def setup(bot):
bot.add_cog(PlayerTracker(bot))
|
python
|
__all__ = ['ioc_api', 'ioc_common', 'ioc_et', 'xmlutils']
|
python
|
import modin.pandas as pd
import swifter # Do not remove - this modified bindings for modin
import sys, os
import datetime
import csv
import random
import h5py
import numpy as np
import ipaddress
import datetime as datetime
def write_single_graph(f, graph_id, x, edge_index, y, attrs=None, **kwargs):
'''
store into hdf5 file
'''
f.create_dataset(f'{graph_id}/x', data=x, dtype = 'float32')
f.create_dataset(f'{graph_id}/edge_index', data=edge_index, dtype = 'int64')
f.create_dataset(f'{graph_id}/y', data=y, dtype = 'uint8')
for key in kwargs:
f.create_dataset(f'{graph_id}/{key}', data=kwargs[key])
if attrs is not None:
for key in attrs:
f[f'{graph_id}'].attrs[key] = attrs[key]
return None
def ip2int(ip):
'''
convert x.x.x.x into a number
'''
try:
ip = ip.split(',')[0]
ip = ipaddress.ip_address(ip)
ip = int(ip)
return ip
except:
return random.randint(0, 1<<32)
def search_dict(IP, IP_dict):
'''
use a dictionary to renumber the IPs into 0,1,2,...
'''
if IP not in IP_dict:
IP_dict[IP] = len(IP_dict)
return IP_dict[IP]
def prepare_background_(f, start_time, stop_time, NPARTS=30):
#read data
df = pd.read_csv(f, sep = '@')#, nrows = 10000)#
df.columns = ["time", "srcIP", "dstIP"]
# contains per-minute logs
#filter time
df['time'] = df['time'].swifter.set_npartitions(NPARTS).apply(lambda x: datetime.datetime.strptime(x[:21], "%b %d, %Y %H:%M:%S"))
if start_time is not None:
start_time_formated = datetime.datetime.strptime(start_time, "%Y%m%d%H%M%S")
df = df[ df.time >= start_time_formated]
if stop_time is not None:
stop_time_formated = datetime.datetime.strptime(stop_time, "%Y%m%d%H%M%S")
df = df[ df.time < stop_time_formated]
#transform time and IP address into formal type
df["srcIP"] = df["srcIP"].swifter.set_npartitions(NPARTS).apply(ip2int)
df["dstIP"] = df["dstIP"].swifter.set_npartitions(NPARTS).apply(ip2int)
#aggregate nodes according to /20, build dictionary
df['srcIP'] = df['srcIP'].swifter.set_npartitions(NPARTS).apply(lambda x: x >> 12)
df['dstIP'] = df['dstIP'].swifter.set_npartitions(NPARTS).apply(lambda x: x >> 12)
# Drop time column and get rid of duplicates
# Convert to pandas to drop (faster)
df = df._to_pandas()
df = df.drop(columns=['time'])
df = df.drop_duplicates()
# shared dictionary, using across threads will mess it up
#renumber into 0, 1, 2, ..
IP_dict = {}
df["srcIP"] = df["srcIP"].apply(lambda x : search_dict(x, IP_dict))
df["dstIP"] = df["dstIP"].apply(lambda x : search_dict(x, IP_dict))
#write into h5py files
num_nodes = len(IP_dict)
num_edges = df.shape[0]
edge_index = np.array(df[["srcIP", "dstIP"]]).T
return edge_index, num_nodes, num_edges
def prepare_background(f, dst_dir, dst_name, graph_id, start_time, stop_time):
'''
Transform txt files into standard hdf5 format
arg = [txt_file_name, subgroup of graphs]
'''
edge_index, num_nodes, num_edges = prepare_background_(f, start_time, stop_time)
f_h5py = h5py.File(os.path.join(dst_dir,dst_name), 'a')
write_single_graph(f_h5py,
graph_id = graph_id,
x = np.ones([num_nodes, 1]),
edge_index = edge_index,
y = np.zeros(num_nodes),
attrs={'num_nodes': num_nodes, 'num_edges': num_edges, 'num_evils':0})
f_h5py.close()
if __name__ == '__main__':
# prepare_background('equinix-nyc.dirA.20181220-131256.UTC.anon.pcap', '.', 'tmp.hdf5', 0, '20181220081256', '20181220081257')
prepare_background('/p/adversarialml/as9rw/datasets/raw_botnet/temp.tmp',
'/p/adversarialml/as9rw/datasets/raw_botnet', 'tmp.hdf5', 0, None, None)
|
python
|
# Class to generate batches of image data to be fed to model
# inclusive of both original data and augmented data
# https://gist.github.com/devxpy/a73744bab1b77a79bcad553cbe589493
# example
# train_gen = PersonDataGenerator(
# train_df,
# batch_size=32,
# aug_list=[
# ImageDataGenerator(rotation_range=45),
# ImageDataGenerator(horizontal_flip=True),
# ImageDataGenerator(vertical_flip=True),
# ],
# incl_orig=True, # Whether to include original images
# )
from __future__ import division
import os
import os.path as path
import numpy as np
import keras
import cv2
from keras.preprocessing.image import ImageDataGenerator, img_to_array
class PersonDataGenerator(keras.utils.Sequence):
def __init__(self, df, batch_size=32, shuffle=True, aug_list=[], incl_orig=True):
""" Ground truth data batch generator """
self.df = df
self.batch_size=batch_size
self.shuffle = shuffle
self.on_epoch_end()
self.aug_list = aug_list
self.incl_orig = incl_orig
self.orig_len = int(np.floor(self.df.shape[0] / self.batch_size))
# Label columns per attribute
self._gender_cols_ = [col for col in df.columns if col.startswith("gender")]
self._imagequality_cols_ = [col for col in df.columns if col.startswith("imagequality")]
self._age_cols_ = [col for col in df.columns if col.startswith("age")]
self._weight_cols_ = [col for col in df.columns if col.startswith("weight")]
self._carryingbag_cols_ = [col for col in df.columns if col.startswith("carryingbag")]
self._footwear_cols_ = [col for col in df.columns if col.startswith("footwear")]
self._emotion_cols_ = [col for col in df.columns if col.startswith("emotion")]
self._bodypose_cols_ = [col for col in df.columns if col.startswith("bodypose")]
def __len__(self):
"""
Number of batches in the Sequence(i.e per epoch).
"""
if self.incl_orig:
delta = 1
else:
delta = 0
return self.orig_len * (len(self.aug_list) + delta)
def __getitem__(self, index):
"""
Gets batches of images - generates sets of images
based on augementation strategies, can include
original images as well - Original images will be
rescaled while generating batches
fetch batches of image data and targets
"""
if not self.incl_orig :
index += self.orig_len - 1
if index > self.orig_len - 1:
aug = self.aug_list[index // self.orig_len - 1]
index %= self.orig_len
else:
aug = None
batch_slice = slice(index * self.batch_size, (index + 1) * self.batch_size)
items = self.df.iloc[batch_slice]
images = np.stack([cv2.imread(item["image_path"]) for _, item in items.iterrows()])
if aug is not None:
images = aug.flow(images, shuffle=False).next()
target = {
"gender_output" : items[self._gender_cols_].values,
"image_quality_output" : items[self._imagequality_cols_].values,
"age_output" : items[self._age_cols_].values,
"weight_output" : items[self._weight_cols_].values,
"bag_output" : items[self._carryingbag_cols_].values,
"pose_output" : items[self._bodypose_cols_].values,
"footwear_output" : items[self._footwear_cols_].values,
"emotion_output" : items[self._emotion_cols_].values,
}
return images, target
def on_epoch_end(self):
"""
Shuffles/sample the df and thereby
updates indexes after each epoch
Method called at the end of every epoch.
"""
if self.shuffle == True:
self.df = self.df.sample(frac=1).reset_index(drop=True)
|
python
|
from ._abstract import AbstractSearcher
from ._result import RecipeLink, SearchResult
import urllib.parse
from typing import List
class NyTimes(AbstractSearcher):
def __init__(self):
AbstractSearcher.__init__(self)
@classmethod
def host(cls):
return "https://cooking.nytimes.com"
def build_url(self, keyword, index):
query = urllib.parse.quote_plus(keyword)
return f'https://cooking.nytimes.com/search?q={query}&page={index}'
def parse_results(self, soup) -> List[RecipeLink]:
# Simple HTML lookups.
recipes = soup.find_all('article', class_='recipe-card')
results : List[RecipeLink] = []
for recipe in recipes:
title_block = recipe.find('div', class_='card-info-wrapper').find('a', class_='card-link')
link = self.parse_link(title_block.get('href'))
title = title_block.find('h3').string
results.append(RecipeLink(title.strip(), link, self.host()))
return results
|
python
|
import json
import requests
import time
import hmac
import hashlib
from requests.exceptions import HTTPError
SDK_VERSION = '1.0.0'
CLOCK_DRIFT = 300
class HTTPClient(object):
def request(self, method, url, headers, data=None, auth=None):
raise NotImplementedError('subclass must implement request')
class RequestsClient(HTTPClient):
def request(self, method, url, headers, data=None, auth=None):
return requests.request(method, url, headers=headers, data=data, auth=auth)
class UnexpectedResponse(Exception):
def __init__(self, status, reason, message):
self.status = status
self.reason = reason
self.message = message
@staticmethod
def from_response(data):
return UnexpectedResponse(data.get('status'), data.get('reason'), data.get('message'))
class HostedTransactionResponse(object):
def __init__(self, tokens, hosted_url):
self.tokens = tokens
self.hosted_url = hosted_url
class Tokens(object):
def __init__(self, refresh_token, access_token, client_token, expiry, transaction_id, response):
self.access_token = access_token
self.client_token = client_token
self.refresh_token = refresh_token
self.expiry = expiry
self.transaction_id = transaction_id
self.response = response
def refresh(self, access_token, client_token, expiry, transaction_id):
self.access_token = access_token
self.client_token = client_token
self.expiry = expiry
self.transaction_id = transaction_id
def needs_refresh(self):
return self.access_token is None or self.expiry is None or self.expiry < time.time()
@staticmethod
def from_refresh(refresh_token):
return Tokens(refresh_token, None, None, None, None, None)
class Client(object):
def __init__(self, api_secret=None, **kwargs):
self.api_secret = api_secret
self.api_host = kwargs.get('api_host', 'https://api.berbix.com')
self.http_client = kwargs.get('http_client', RequestsClient())
if self.api_secret is None:
self.api_secret = kwargs.get('client_secret')
if self.api_secret is None:
raise ValueError(
'api_secret must be provided when instantiating the Berbix client')
def __fetch_tokens(self, path, payload):
try:
headers = {
'Content-Type': 'application/json',
'User-Agent': 'BerbixPython/' + SDK_VERSION,
}
result = self.http_client.request(
method='POST',
url='{}{}'.format(self.api_host, path),
headers=headers,
data=json.dumps(payload),
auth=(self.api_secret, ''))
if result.status_code < 200 or result.status_code >= 300:
raise UnexpectedResponse.from_response(
json.loads(result.content))
data = json.loads(result.content)
return Tokens(
data.get('refresh_token'),
data.get('access_token'),
data.get('client_token'),
data.get('expires_in') + time.time(),
data.get('transaction_id'),
data)
except HTTPError as err:
raise err
def create_transaction(self, **kwargs):
payload = {}
if 'email' in kwargs:
payload['email'] = kwargs['email']
if 'phone' in kwargs:
payload['phone'] = kwargs['phone']
if 'customer_uid' in kwargs:
payload['customer_uid'] = str(kwargs['customer_uid'])
else:
raise ValueError(
'customer_uid must be provided when creating a transaction')
if 'template_key' in kwargs:
payload['template_key'] = kwargs['template_key']
else:
raise ValueError(
'template_key must be provided when creating a transaction')
if 'hosted_options' in kwargs:
payload['hosted_options'] = kwargs['hosted_options']
return self.__fetch_tokens('/v0/transactions', payload)
def create_hosted_transaction(self, **kwargs):
if 'hosted_options' not in kwargs:
kwargs['hosted_options'] = {}
tokens = self.__fetch_tokens('/v0/transactions', kwargs)
return HostedTransactionResponse(tokens, tokens.response['hosted_url'])
def refresh_tokens(self, tokens):
return self.__fetch_tokens('/v0/tokens', {
'refresh_token': tokens.refresh_token,
'grant_type': 'refresh_token',
})
def refresh_if_necessary(self, tokens):
if tokens.needs_refresh():
refreshed = self.refresh_tokens(tokens)
tokens.refresh(refreshed.access_token, refreshed.client_token,
refreshed.expiry, refreshed.transaction_id)
def __token_auth_request(self, method, tokens, path, payload=None):
self.refresh_if_necessary(tokens)
try:
headers = {
'Authorization': 'Bearer {0}'.format(tokens.access_token),
'User-Agent': 'BerbixPython/' + SDK_VERSION,
}
data = None
if payload != None:
data = json.dumps(payload)
headers["Content-Type"] = "application/json"
result = self.http_client.request(
method=method,
url='{}{}'.format(self.api_host, path),
headers=headers,
data=data)
if result.status_code < 200 or result.status_code >= 300:
raise UnexpectedResponse.from_response(
json.loads(result.content))
elif result.status_code == 204:
return
return json.loads(result.content)
except HTTPError as err:
raise err
def fetch_transaction(self, tokens):
return self.__token_auth_request('GET', tokens, '/v0/transactions')
def delete_transaction(self, tokens):
return self.__token_auth_request('DELETE', tokens, '/v0/transactions')
def update_transaction(self, tokens, **kwargs):
payload = {}
if 'action' in kwargs:
payload['action'] = kwargs['action']
if 'note' in kwargs:
payload['note'] = kwargs['note']
return self.__token_auth_request('PATCH', tokens, '/v0/transactions', payload)
def override_transaction(self, tokens, **kwargs):
payload = {}
if 'response_payload' in kwargs:
payload['response_payload'] = kwargs['response_payload']
if 'flags' in kwargs:
payload['flags'] = kwargs['flags']
if 'override_fields' in kwargs:
payload['override_fields'] = kwargs['override_fields']
return self.__token_auth_request('PATCH', tokens, '/v0/transactions/override', payload)
def validate_signature(self, secret, body, header):
parts = header.split(',')
# Version (parts[0]) is currently unused
timestamp = parts[1]
signature = parts[2]
if int(timestamp) < time.time() - CLOCK_DRIFT:
return False
message = '{},{},{}'.format(timestamp, secret, body).encode('ascii')
digest = hmac.new(
str.encode(secret),
msg=message,
digestmod=hashlib.sha256
).hexdigest()
return digest == signature
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import subprocess
if subprocess.Popen(['./problem']).wait() != 0:
print("Wow, you\'ve crushed this")
flagfile = open('flag')
if not flagfile:
print("Flag is missing, tell admin")
else:
print(flagfile.read())
|
python
|
"""
Sudo2 is for Loomgild.py
"""
import time
from time import sleep
# Command Functions
def help():
print("Hello!")
print("Welcome to Loomgild, a script that imitates a command line.")
print("This is one of the few commands that you can use.")
print("We will now load the commands you can use..")
print("\n")
sleep(2.00)
print("Sys commands: exit")
print("Core commands: none")
print("Utility commands: help")
print("Misc commands: none")
def output():
prompt1 = input("Please provide the input to output: ")
print(prompt1)
def output_h():
print("Description: A command that outputs the user's input.")
print("Usage: output")
print("Tags: input, interactive")
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script to hold temporary routines created during the beamtime.
Everything added here is star (*) imported into the IPython shell after the
``SplitAndDelay`` object has succesfully instantiated. Therefore, it is
recommended to run the specific unit-test to quickly ensure your inserted code
is syntactically correct. More specifically, it will test if this script is
importable. Of course this will not guarantee that the code works as intended,
but it will pick up on any 'easy' mistakes, like a mismatched parenthesi. To
run the test, in the top level directory, first source the snd environment:
source snd_env.sh
Then run the pytest script with the following command:
python run_tests.py hxrsnd/tests/test_scripts.py
The script will run (at least) one test and if your code was written correctly,
it will pass.
"""
# Imports from the Python standard library go here
import logging
# Imports from the third-party modules go here
import numpy as np
from ophyd import Component as Cmp
from ophyd import Device, EpicsSignal
from ophyd.sim import hw
from ophyd.status import wait as status_wait
# Imports from the HXRSnD module go here
import snd_devices
# Imports from other SLAC modules go here
# Default logger
logger = logging.getLogger(__name__)
###############################################################################
# Good Design Practices #
###############################################################################
# # Replace all print() statements with logger.info() statements # #
###############################################################################
# The Main reason for this is the IPython shell will log everything you log in
# log files IFF you use the logger methods, while also printing to the console.
# Even better, is if you include various logger levels. To use the logger,
# simply make the following substitution:
# print("text") --> logger.info("text")
# It is that simple, that the message will now be archived in the info level
# (HXRSnD/logs/info.log) and debug level (HXRSnD/logs/debug.log) log files.
# # Leave Comments # #
###############################################################################
# This seems like it may not be that important, but the purpose of this file is
# to temporarily hold scripts developed during beamtime to then be migrated by
# us (PCDS) into the module. By leaving comments, you make it easier for
# everyone to understand what the code is doing.
###############################################################################
# Insert Code Below #
###############################################################################
hw = hw() # Fake hardware for testing
fake_motor = hw.motor
class NotepadScanStatus(Device):
istep = Cmp(EpicsSignal, ":ISTEP")
isscan = Cmp(EpicsSignal, ":ISSCAN")
nshots = Cmp(EpicsSignal, ":NSHOTS")
nsteps = Cmp(EpicsSignal, ":NSTEPS")
var0 = Cmp(EpicsSignal, ":SCANVAR00")
var1 = Cmp(EpicsSignal, ":SCANVAR01")
var2 = Cmp(EpicsSignal, ":SCANVAR02")
var0_max = Cmp(EpicsSignal, ":MAX00")
var1_max = Cmp(EpicsSignal, ":MAX01")
var2_max = Cmp(EpicsSignal, ":MAX02")
var0_min = Cmp(EpicsSignal, ":MIN00")
var1_min = Cmp(EpicsSignal, ":MIN01")
var2_min = Cmp(EpicsSignal, ":MIN02")
def clean_fields(self):
for sig_name in self.signal_names:
sig = getattr(self, sig_name)
val = sig.value
if isinstance(val, (int, float)):
sig.put(0)
elif isinstance(val, str):
sig.put('')
notepad_scan_status = NotepadScanStatus('XCS:SCAN', name='xcs_scan_status')
def ascan(motor, start, stop, num, events_per_point=360, record=False,
controls=None, **kwargs):
"""
Quick re-implementation of old python for the transition
"""
daq = snd_devices.daq
events = events_per_point
status = notepad_scan_status
status.clean_fields()
if controls is None:
controls = {}
start_pos = motor.position
def get_controls(motor, extra_controls):
out_arr = {motor.name: motor}
out_arr.update(extra_controls)
return out_arr
try:
scan_controls = get_controls(motor, controls)
daq.configure(record=record, controls=scan_controls)
status.isscan.put(1)
status.nshots.put(events_per_point)
status.nsteps.put(num)
status.var0.put(motor.name)
status.var0_max.put(max((start, stop)))
status.var0_min.put(min((start, stop)))
for i, step in enumerate(np.linspace(start, stop, num)):
logger.info('Beginning step {}'.format(step))
try:
mstat = motor.set(step, verify_move=False, **kwargs)
except TypeError:
mstat = motor.set(step, **kwargs)
status.istep.put(i)
status_wait(mstat)
scan_controls = get_controls(motor, controls)
daq.begin(events=events, controls=scan_controls)
logger.info('Waiting for {} events ...'.format(events))
daq.wait()
finally:
logger.info('DONE!')
status.clean_fields()
daq.end_run()
daq.disconnect()
try:
motor.set(start_pos, verify_move=False, **kwargs)
except TypeError:
motor.set(start_pos, **kwargs)
|
python
|
from random import randint
while True:
print("----------\n[j] para jogar o dado\n[e] para fechar")
res = str(input().replace(" ", "").lower())
if res == "j":
print(f"\nvalor do dado: {randint(1, 6)}")
if res == "e":
break
print("----------\n")
|
python
|
import contextlib
import logging
import os
from pathlib import Path
import shutil
from subprocess import CalledProcessError, check_output
import sys
from tempfile import NamedTemporaryFile
from typing import cast
from spython.main import Client
from lm_zoo import errors
from lm_zoo.backends import ContainerBackend
from lm_zoo.constants import STATUS_CODES
from lm_zoo.models import Model, SingularityModel
L = logging.getLogger(__name__)
@contextlib.contextmanager
def modified_environ(*remove, **update):
"""
Temporarily updates the ``os.environ`` dictionary in-place.
The ``os.environ`` dictionary is updated in-place so that the modification
is sure to work in all situations.
:param remove: Environment variables to remove.
:param update: Dictionary of environment variables and values to add/update.
"""
# https://stackoverflow.com/a/34333710/176075
env = os.environ
update = update or {}
remove = remove or []
# List of environment variables being updated or removed.
stomped = (set(update.keys()) | set(remove)) & set(env.keys())
# Environment variables and values to restore on exit.
update_after = {k: env[k] for k in stomped}
# Environment variables and values to remove on exit.
remove_after = frozenset(k for k in update if k not in env)
try:
env.update(update)
[env.pop(k, None) for k in remove]
yield
finally:
env.update(update_after)
[env.pop(k) for k in remove_after]
def is_cuda_available():
"""
Hacky method to check whether CUDA is available for use on this host.
"""
if shutil.which("nvidia-smi") is None:
return False
try:
output = check_output(["nvidia-smi", "-L"])
has_gpus = bool(output.strip())
return has_gpus
except CalledProcessError:
return False
class SingularityBackend(ContainerBackend):
@classmethod
def is_compatible(cls, model):
return len(set(model.platforms) & {"singularity", "shub", "library"}) > 0
def image_exists(self, model):
# TODO library, shub
result = Client.inspect(model.reference)
if result.get("return_code", 0) != 0:
return False
return True
def pull_image(self, model, progress_stream=sys.stderr):
if len(set(model.platforms) & {"shub", "library"}) == 0:
if "singularity" in model.platforms:
# It's a local image. Just check that it exists, and raise if
# not.
if not self.image_exists(model):
raise ValueError("Could not find local Singularity image at %s" % (model.reference,))
else:
raise ValueError("Only know how to pull from shub:// and library://"
" . This Singularity model does not come from "
"either repository.")
return Client.pull(image="%s://%s" % (model.repository, model.reference))
def run_command(self, model: Model, command_str,
mounts=None, environment=None,
stdin=None, stdout=sys.stdout, stderr=sys.stderr,
raise_errors=True):
model = cast(SingularityModel, model)
if mounts is None:
mounts = []
if environment is None:
environment = {}
# Support custom checkpoint loading
if model.checkpoint is not None:
host_checkpoint_path = Path(model.checkpoint).absolute()
# Mount given checkpoint read-only within the guest
guest_checkpoint_path = "/opt/lmzoo_checkpoint"
mounts.append((host_checkpoint_path, guest_checkpoint_path, "ro"))
# Update relevant environment variable
environment["LMZOO_CHECKPOINT_PATH"] = guest_checkpoint_path
binds = ["%s:%s:%s" % (host, guest, mode)
for host, guest, mode in mounts]
# TODO make configurable
nv = is_cuda_available()
command = command_str.split(" ")
if stdin is not None:
stdin_f = NamedTemporaryFile("w")
stdin_f.write(stdin.read())
stdin_f.flush()
binds.append("%s:/host_stdin:ro" % stdin_f.name)
command = ["sh", "-c", 'cat /host_stdin | %s' % " ".join(command)]
# TODO no separate stderr support :( manually reroute stderr for now
command.append("2>/dev/null")
# Prepare environment variables for export
environment = {"SINGULARITYENV_%s" % key: value
for key, value in environment.items()}
try:
with modified_environ(**environment):
exec_options = []
# Maximally isolate container from host -- this resolves some
# parallel execution issues we've observed in the past.
exec_options.append("--containall")
result = Client.execute(image=model.reference, command=command,
nv=nv, bind=binds, stream=True,
options=exec_options)
for line in result:
stdout.write(line)
except CalledProcessError as e:
if raise_errors:
if e.returncode == STATUS_CODES["unsupported_feature"]:
feature = command_str.split(" ")[0]
raise errors.UnsupportedFeatureError(feature=feature,
model=str(model))
else:
raise
if stdin is not None:
stdin_f.close()
return result
|
python
|
#importamos la libreria forms:
from django import forms
#creamos una lista de las posibles opciones del select:
from .pqrsf import PQRSF_CHOICES
#creamos la estructura dl formulario
class ContactFrom(forms.Form):
"""creamos los campos"""
email = forms.EmailField(label="correo electrónico", widget=forms.EmailInput(attrs={'class':'form-control'}), required=True)
tipom = forms.ChoiseField(choices = PQRSF_CHOICES, label="Tipo de atencion requerida", initial='', widget=forms.Select(attrs={'class':'form-control'}), required=True)
nombre = forms.CharField(label="nombre", required=True, widget=forms.TextInput(attrs={'class':'form-control'}))
msj = forms.CharField(label="Mensaje", widget=forms.Textarea(attrs={'class':'form-control', 'rows':'3'}), required=True)
|
python
|
def test_load(session, inline):
inline("PASS")
|
python
|
TAG_MAP = {
('landuse', 'forest'): {"TYPE": "forest", "DRAW_TYPE": "plane"},
('natural', 'wood'): {"TYPE": "forest", "SUBTYPE": "natural", "DRAW_TYPE": "plane"}
}
def find_type(tags):
keys = list(tags.items())
return [TAG_MAP[key] for key in keys if key in TAG_MAP]
|
python
|
from twilio.twiml.voice_response import VoiceResponse, Dial
def generate_wait():
twiml_response = VoiceResponse()
wait_message = (
'Thank you for calling. Please wait in line for a few seconds.'
' An agent will be with you shortly.'
)
wait_music = 'http://com.twilio.music.classical.s3.amazonaws.com/BusyStrings.mp3'
twiml_response.say(wait_message)
twiml_response.play(wait_music)
return str(twiml_response)
def generate_connect_conference(call_sid, wait_url, start_on_enter, end_on_exit):
twiml_response = VoiceResponse()
dial = Dial()
dial.conference(
call_sid,
start_conference_on_enter=start_on_enter,
end_conference_on_exit=end_on_exit,
wait_url=wait_url,
)
return str(twiml_response.append(dial))
|
python
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from helper import find_csv
grid = plt.GridSpec(2, 2, wspace=0.4, hspace=0.3)
ax1 = plt.subplot(grid[0,0])
ax2= plt.subplot(grid[0,1])
ax3= plt.subplot(grid[1,:])
for i in find_csv():
df = pd.read_csv(i,header=None)
df_forward = df[:int(len(df)/2)]
forward_peak = df_forward[0].iloc[df_forward[1].idxmin()]
df_backward = df[int(len(df)/2):]
df_backward = df_backward.reset_index(drop=True) #Use drop to discard the old index
backward_peak = df_backward[0].iloc[df_backward[1].idxmax()]
#ax1.axvline(x=forward_peak,alpha=0.7)
#ax2.axvline(x=backward_peak,alpha=0.7)
df_forward.plot(x=0,y=1, ax=ax1)
df_backward.plot(x=0,y=1,ax=ax2)
df.plot(x=0,y=1,ax=ax3)
ax1.legend(find_csv(),loc=0)
ax2.legend(find_csv(),loc=0)
ax3.legend(find_csv(),loc=0)
ax1.set_title('Forward scan')
ax2.set_title('Reverse Scan')
ax3.set_title('CV')
plt.show()
|
python
|
import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class SetMaterialRgbParamNode(Node, ArmLogicTreeNode):
'''Set material rgb param node'''
bl_idname = 'LNSetMaterialRgbParamNode'
bl_label = 'Set Material RGB Param'
bl_icon = 'GAME'
def init(self, context):
self.inputs.new('ArmNodeSocketAction', 'In')
self.inputs.new('NodeSocketShader', 'Material')
self.inputs.new('NodeSocketString', 'Node')
self.inputs.new('NodeSocketColor', 'Color')
self.outputs.new('ArmNodeSocketAction', 'Out')
add_node(SetMaterialRgbParamNode, category='Action')
|
python
|
#!/usr/bin/env python
# encoding: utf-8
from smisk.mvc import *
from smisk.serialization import data
import datetime, time
# Importing the serializers causes them to be registered
import my_xml_serializer
import my_text_serializer
# Some demo data
DEMO_STRUCT = dict(
string = "Doodah",
items = ["A", "B", 12, 32.1, [1, 2, 3]],
float = 0.1,
integer = 728,
dict = dict(
str = "<hello & hi there!>",
unicode = u'M\xe4ssig, Ma\xdf',
true_value = True,
false_value = False,
),
data = data("<binary gunk>"),
more_data = data("<lots of binary gunk>" * 10),
date = datetime.datetime.fromtimestamp(time.mktime(time.gmtime())),
)
# Our controller tree
class root(Controller):
def __call__(self, *args, **params):
'''Return some data
'''
return DEMO_STRUCT
def echo(self, *va, **kw):
'''Returns the structure received
'''
if not kw and va:
kw['arguments'] = va
return kw
if __name__ == '__main__':
from smisk.config import config
config.loads('"logging": {"levels":{"":DEBUG}}')
main()
|
python
|
#!/usr/bin/env python
"""Stp - Stock Patterns
Usage: stp_mgr
stp_mgr insider
"""
from docopt import docopt
import stp
print(stp.__file__)
from stp import feed
from stp.feed.insidertrading import data
import sys
def insider():
records = data.get_records()
for record in records:
print(record)
def main():
args = docopt(__doc__)
if args["insider"]:
insider()
if __name__ == "__main__":
main()
|
python
|
# --------------------------------------------------------
# CRPN
# Written by Linjie Deng
# --------------------------------------------------------
import yaml
import caffe
import numpy as np
from fast_rcnn.config import cfg
from fast_rcnn.nms_wrapper import nms
from quad.quad_convert import whctrs, mkanchors, quad_2_aabb, obb_2_quad, dual_roi
from quad.quad_2_obb import quad_2_obb
DEBUG = False
class Corner(object):
# Corner property
def __init__(self, name):
self.name = name
# position
self.pos = None
# probability
self.prb = None
# class of link direction
self.cls = None
class ProposalLayer(caffe.Layer):
# Corner-based Region Proposal Network
# Input: prob map of each corner
# Output: quadrilateral region proposals
def setup(self, bottom, top):
# top: (ind, x1, y1, x2, y2, x3, y3, x4, y4)
layer_params = yaml.load(self.param_str)
self._feat_stride = layer_params['feat_stride']
num_rois = 2 if cfg.DUAL_ROI else 1
top[0].reshape(num_rois, 9)
if len(top) > 1:
top[1].reshape(num_rois, 5)
def forward(self, bottom, top):
# params
cfg_key = self.phase # either 'TRAIN' or 'TEST'
if cfg_key == 0:
cfg_ = cfg.TRAIN
else:
cfg_ = cfg.TEST
# corner params
pt_thres = cfg_.PT_THRESH
pt_max_num = cfg.PT_MAX_NUM
pt_nms_range = cfg.PT_NMS_RANGE
pt_nms_thres = cfg.PT_NMS_THRESH
# proposal params
ld_interval = cfg.LD_INTERVAL
ld_um_thres = cfg.LD_UM_THRESH
# rpn params
# min_size = cfg_.RPN_MIN_SIZE
nms_thresh = cfg_.RPN_NMS_THRESH
pre_nms_topN = cfg_.RPN_PRE_NMS_TOP_N
post_nms_topN = cfg_.RPN_POST_NMS_TOP_N
im_info = bottom[0].data[0, :]
score_tl = bottom[1].data[0, :].transpose((1, 2, 0))
score_tr = bottom[2].data[0, :].transpose((1, 2, 0))
score_br = bottom[3].data[0, :].transpose((1, 2, 0))
score_bl = bottom[4].data[0, :].transpose((1, 2, 0))
scores = np.concatenate([score_tl[:, :, :, np.newaxis],
score_tr[:, :, :, np.newaxis],
score_br[:, :, :, np.newaxis],
score_bl[:, :, :, np.newaxis]], axis=3)
map_info = scores.shape[:2]
# 1. sample corner candidates from prob maps
tl, tr, br, bl = _corner_sampling(scores, pt_thres, pt_max_num, pt_nms_range, pt_nms_thres)
# 2. assemble corner candidates into proposals
proposals = _proposal_sampling(tl, tr, br, bl, map_info, ld_interval, ld_um_thres)
# 3. filter
proposals = filter_quads(proposals)
scores = proposals[:, 8]
proposals = proposals[:, :8]
# 3. rescale quads into raw image space
proposals = proposals * self._feat_stride
# 4. quadrilateral non-max surpression
order = scores.ravel().argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
keep = nms(np.hstack((proposals, scores[:, np.newaxis])).astype(np.float32, copy=False), nms_thresh)
proposals = proposals[keep, :]
scores = scores[keep]
if post_nms_topN > 0:
proposals = proposals[:post_nms_topN, :]
scores = scores[:post_nms_topN]
if proposals.shape[0] == 0:
# add whole image to avoid error
print 'NO PROPOSALS!'
proposals = np.array([[0, 0, im_info[1], 0, im_info[1], im_info[0], 0, im_info[0]]])
scores = np.array([0.0])
# output
# top[0]: quads(x1, y1, x2, y2, x3, y3, x4, y4)
# top[1]: rois(xmin, ymin, xmax, ymax, theta)
# top[2]: scores
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
top[0].reshape(*blob.shape)
top[0].data[...] = blob
if len(top) > 1:
if cfg.DUAL_ROI:
rois = quad_2_obb(np.array(proposals, dtype=np.float32))
rois = dual_roi(rois)
else:
rois = quad_2_obb(np.array(proposals, dtype=np.float32))
batch_inds = np.zeros((rois.shape[0], 1), dtype=np.float32)
blob = np.hstack((batch_inds, rois.astype(np.float32, copy=False)))
top[1].reshape(*blob.shape)
top[1].data[...] = blob
if len(top) > 2:
scores = np.vstack((scores, scores)).transpose()
top[2].reshape(*scores.shape)
top[2].data[...] = scores
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def _map_2_corner(pred_map, thresh, max_num, nms_range, nms_thres):
pos_map = 1 - pred_map[:, :, 0]
pts_cls = np.argmax(pred_map[:, :, 1:], 2) + 1
ctr_y, ctr_x = np.where(pos_map >= thresh)
ctr_pts = np.vstack((ctr_x, ctr_y)).transpose()
ws = np.ones(ctr_x.shape) * nms_range
hs = np.ones(ctr_y.shape) * nms_range
anchors = np.hstack((mkanchors(ws, hs, ctr_x, ctr_y), get_value(ctr_pts, pos_map)))
keep = nms(anchors, nms_thres)
if max_num > 0:
keep = keep[:max_num]
pos = ctr_pts[keep, :]
prb = pos_map
cls = pts_cls
return pos, prb, cls
def _corner_sampling(maps, thresh, max_num, nms_range, nms_thres):
tl = Corner('top_left')
tl.pos, tl.prb, tl.cls = _map_2_corner(maps[:, :, :, 0], thresh, max_num, nms_range, nms_thres)
tr = Corner('top_right')
tr.pos, tr.prb, tr.cls = _map_2_corner(maps[:, :, :, 1], thresh, max_num, nms_range, nms_thres)
br = Corner('bot_right')
br.pos, br.prb, br.cls = _map_2_corner(maps[:, :, :, 2], thresh, max_num, nms_range, nms_thres)
bl = Corner('bot_left')
bl.pos, bl.prb, bl.cls = _map_2_corner(maps[:, :, :, 3], thresh, max_num, nms_range, nms_thres)
return tl, tr, br, bl
def _gen_diags(a, b, theta_invl=15, max_diff=1):
max_label = round(360.0 / theta_invl)
idx_a = np.arange(0, a.pos.shape[0])
idx_b = np.arange(0, b.pos.shape[0])
idx_a, idx_b = np.meshgrid(idx_a, idx_b)
idx_a = idx_a.ravel()
idx_b = idx_b.ravel()
diag_pos = np.hstack((a.pos[idx_a, :], b.pos[idx_b, :]))
#
keep = np.where((diag_pos[:, 0] != diag_pos[:, 2]) | (diag_pos[:, 1] != diag_pos[:, 3]))[0]
diag_pos = diag_pos[keep, :]
prac_label = compute_link(diag_pos[:, 0:2], diag_pos[:, 2:4], theta_invl)
pred_label = get_value(diag_pos[:, 0:2], a.cls)
diff_label_a = diff_link(prac_label, pred_label, max_label)
#
prac_label = np.mod(prac_label + max_label / 2, max_label)
pred_label = get_value(diag_pos[:, 2:4], b.cls)
diff_label_b = diff_link(prac_label, pred_label, max_label)
keep = np.where((diff_label_a <= max_diff) & (diff_label_b <= max_diff))[0]
diag_pos = diag_pos[keep, :]
diag_prb = np.hstack((get_value(diag_pos[:, 0:2], a.prb), get_value(diag_pos[:, 2:4], b.prb)))
return diag_pos, diag_prb
def _gen_trias(diag_pos, diag_prb, c, theta_invl=15, max_diff=1):
max_label = 360 / theta_invl
idx_a = np.arange(0, diag_pos.shape[0])
idx_b = np.arange(0, c.pos.shape[0])
idx_a, idx_b = np.meshgrid(idx_a, idx_b)
idx_a = idx_a.ravel()
idx_b = idx_b.ravel()
tria_pos = np.hstack((diag_pos[idx_a, :], c.pos[idx_b, :]))
tria_prb = np.hstack((diag_prb[idx_a, :], get_value(c.pos[idx_b, :], c.prb)))
#
areas = compute_tria_area(tria_pos[:, 0:2], tria_pos[:, 2:4], tria_pos[:, 4:6])
keep = np.where(areas != 0)[0]
tria_pos = tria_pos[keep, :]
tria_prb = tria_prb[keep, :]
ws, hs, ctr_x, ctr_y = whctrs(tria_pos[:, 0:4])
prac_theta = compute_theta(tria_pos[:, 4:6], np.vstack((ctr_x, ctr_y)).transpose())
prac_label = np.floor(prac_theta / theta_invl) + 1
pred_label = get_value(tria_pos[:, 4:6], c.cls)
diff_label = diff_link(prac_label, pred_label, max_label)
keep = np.where(diff_label <= max_diff)[0]
tria_pos = tria_pos[keep, :]
tria_prb = tria_prb[keep, :]
prac_theta = prac_theta[keep]
#
prac_theta = np.mod(prac_theta + 180.0, 360.0) / 180.0 * np.pi
len_diag = np.sqrt(np.sum(np.square(tria_pos[:, 0:2] - tria_pos[:, 2:4]), axis=1)) / 2.
dist_x = len_diag * np.cos(prac_theta[:, 0])
dist_y = len_diag * np.sin(prac_theta[:, 0])
ws, hs, ctr_x, ctr_y = whctrs(tria_pos[:, 0:4])
tria_pos[:, 4:6] = np.vstack((ctr_x + dist_x, ctr_y - dist_y)).astype(np.int32, copy=False).transpose()
return tria_pos, tria_prb
def _get_last_one(tria, d):
map_shape = d.prb.shape[:2]
ws, hs, ctr_x, ctr_y = whctrs(tria[:, 0:4])
pos = np.vstack((2 * ctr_x - tria[:, 4], 2 * ctr_y - tria[:, 5])).transpose()
pos[:, 0] = np.maximum(np.minimum(pos[:, 0], map_shape[1] - 1), 0)
pos[:, 1] = np.maximum(np.minimum(pos[:, 1], map_shape[0] - 1), 0)
pos = np.array(pos, dtype=np.int32)
prb = get_value(pos, d.prb)
return pos, prb
def _clip_trias(tria_pos, tria_prb, c, map_info):
tria_pos[:, 4] = np.maximum(np.minimum(tria_pos[:, 4], map_info[1] - 1), 0)
tria_pos[:, 5] = np.maximum(np.minimum(tria_pos[:, 5], map_info[0] - 1), 0)
tria_prb[:, 2:] = get_value(tria_pos[:, 4:6], c.prb)
return tria_pos, tria_prb
def _proposal_sampling(tl, tr, br, bl, map_info, theta_invl=15, max_diff=1):
# DIAG: [top_left, bot_right]
diag_pos, diag_prb = _gen_diags(tl, br, theta_invl, max_diff)
# TRIA: [DIAG, top_right]
tria_pos, tria_prb = _gen_trias(diag_pos, diag_prb, tr, theta_invl, max_diff)
# QUAD: [TRIA, bot_left]
temp_pos, temp_prb = _get_last_one(tria_pos, bl)
# refine top_right
tria_pos, tria_prb = _clip_trias(tria_pos, tria_prb, tr, map_info)
# assemble
score = compute_score(np.hstack((tria_prb, temp_prb)))
quads = np.hstack((tria_pos[:, 0:2], tria_pos[:, 4:6], tria_pos[:, 2:4], temp_pos))
quads = np.hstack((quads, score[:, np.newaxis]))
# TRIA: [DIAG, bot_left]
tria_pos, tria_prb = _gen_trias(diag_pos, diag_prb, bl, theta_invl, max_diff)
# QUAD: [TRIA, top_right]
temp_pos, temp_prb = _get_last_one(tria_pos, tr)
# refine bot_left
tria_pos, tria_prb = _clip_trias(tria_pos, tria_prb, bl, map_info)
# assemble
score = compute_score(np.hstack((tria_prb, temp_prb)))
quad = np.hstack((tria_pos[:, 0:2], temp_pos, tria_pos[:, 2:4], tria_pos[:, 4:6]))
quad = np.hstack((quad, score[:, np.newaxis]))
quads = np.vstack((quads, quad))
# DIAG: [bot_left, top_right]
diag_pos, diag_prb = _gen_diags(bl, tr, theta_invl, max_diff)
# TRIA: [DIAG, top_left]
tria_pos, tria_prb = _gen_trias(diag_pos, diag_prb, tl, theta_invl, max_diff)
# QUAD: [TRIA, bot_right]
temp_pos, temp_prb = _get_last_one(tria_pos, br)
# refine top_left
tria_pos, tria_prb = _clip_trias(tria_pos, tria_prb, tl, map_info)
# assemble
score = compute_score(np.hstack((tria_prb, temp_prb)))
quad = np.hstack((tria_pos[:, 4:6], tria_pos[:, 2:4], temp_pos, tria_pos[:, 0:2]))
quad = np.hstack((quad, score[:, np.newaxis]))
quads = np.vstack((quads, quad))
# TRIA: [DIAG, bor_right]
tria_pos, tria_prb = _gen_trias(diag_pos, diag_prb, br, theta_invl, max_diff)
# QUAD: [TRIA, top_left]
temp_pos, temp_prb = _get_last_one(tria_pos, tl)
# refine bor_right
tria_pos, tria_prb = _clip_trias(tria_pos, tria_prb, br, map_info)
# assemble
score = compute_score(np.hstack((tria_prb, temp_prb)))
quad = np.hstack((tria_pos[:, 0:2], temp_pos, tria_pos[:, 2:4], tria_pos[:, 4:6]))
quad = np.hstack((quad, score[:, np.newaxis]))
quads = np.vstack((quads, quad))
return quads
def get_value(pts, maps):
vals = maps[pts[:, 1], pts[:, 0]]
return vals[:, np.newaxis]
def compute_score(scores):
score = scores[:, 0] * scores[:, 1] * scores[:, 2] * scores[:, 3]
return score
def compute_theta(p1, p2):
dx = p2[:, 0] - p1[:, 0]
dy = p2[:, 1] - p1[:, 1]
val = dx / np.sqrt(dx * dx + dy * dy)
val = np.maximum(np.minimum(val, 1), -1)
theta = np.arccos(val) / np.pi * 180
idx = np.where(dy > 0)[0]
theta[idx] = 360 - theta[idx]
return theta[:, np.newaxis]
def compute_link(p1, p2, interval):
theta = compute_theta(p1, p2)
label = np.floor(theta / interval) + 1
return label
def diff_link(t1, t2, max_orient):
dt = np.abs(t2 - t1)
dt = np.minimum(dt, max_orient - dt)
return dt
def compute_tria_area(p1, p2, p3):
area = (p2[:, 0] - p1[:, 0]) * (p3[:, 1] - p1[:, 1]) - \
(p2[:, 1] - p1[:, 1]) * (p3[:, 0] - p1[:, 0])
return area
def filter_quads(quads):
area_1 = compute_tria_area(quads[:, 0:2], quads[:, 2:4], quads[:, 4:6])
area_2 = compute_tria_area(quads[:, 0:2], quads[:, 2:4], quads[:, 6:8])
area_3 = compute_tria_area(quads[:, 0:2], quads[:, 4:6], quads[:, 6:8])
area_4 = compute_tria_area(quads[:, 2:4], quads[:, 4:6], quads[:, 6:8])
areas = area_1 * area_2 * area_3 * area_4
keep = np.where(areas != 0)[0]
quads = quads[keep, :]
return quads
|
python
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from yandex.cloud.access import access_pb2 as yandex_dot_cloud_dot_access_dot_access__pb2
from yandex.cloud.containerregistry.v1 import registry_pb2 as yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__pb2
from yandex.cloud.containerregistry.v1 import registry_service_pb2 as yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2
from yandex.cloud.operation import operation_pb2 as yandex_dot_cloud_dot_operation_dot_operation__pb2
class RegistryServiceStub(object):
"""A set of methods for managing Registry resources.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Get = channel.unary_unary(
'/yandex.cloud.containerregistry.v1.RegistryService/Get',
request_serializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.GetRegistryRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__pb2.Registry.FromString,
)
self.List = channel.unary_unary(
'/yandex.cloud.containerregistry.v1.RegistryService/List',
request_serializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.ListRegistriesRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.ListRegistriesResponse.FromString,
)
self.Create = channel.unary_unary(
'/yandex.cloud.containerregistry.v1.RegistryService/Create',
request_serializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.CreateRegistryRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Update = channel.unary_unary(
'/yandex.cloud.containerregistry.v1.RegistryService/Update',
request_serializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.UpdateRegistryRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.Delete = channel.unary_unary(
'/yandex.cloud.containerregistry.v1.RegistryService/Delete',
request_serializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.DeleteRegistryRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.ListAccessBindings = channel.unary_unary(
'/yandex.cloud.containerregistry.v1.RegistryService/ListAccessBindings',
request_serializer=yandex_dot_cloud_dot_access_dot_access__pb2.ListAccessBindingsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_access_dot_access__pb2.ListAccessBindingsResponse.FromString,
)
self.SetAccessBindings = channel.unary_unary(
'/yandex.cloud.containerregistry.v1.RegistryService/SetAccessBindings',
request_serializer=yandex_dot_cloud_dot_access_dot_access__pb2.SetAccessBindingsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
self.UpdateAccessBindings = channel.unary_unary(
'/yandex.cloud.containerregistry.v1.RegistryService/UpdateAccessBindings',
request_serializer=yandex_dot_cloud_dot_access_dot_access__pb2.UpdateAccessBindingsRequest.SerializeToString,
response_deserializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.FromString,
)
class RegistryServiceServicer(object):
"""A set of methods for managing Registry resources.
"""
def Get(self, request, context):
"""Returns the specified Registry resource.
To get the list of available Registry resources, make a [List] request.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""Retrieves the list of Registry resources in the specified folder.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Create(self, request, context):
"""Creates a registry in the specified folder.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Updates the specified registry.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Deletes the specified registry.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListAccessBindings(self, request, context):
"""access
Lists access bindings for the specified registry.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetAccessBindings(self, request, context):
"""Sets access bindings for the specified registry.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateAccessBindings(self, request, context):
"""Updates access bindings for the specified registry.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RegistryServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.GetRegistryRequest.FromString,
response_serializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__pb2.Registry.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.ListRegistriesRequest.FromString,
response_serializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.ListRegistriesResponse.SerializeToString,
),
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.CreateRegistryRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.UpdateRegistryRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_registry__service__pb2.DeleteRegistryRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'ListAccessBindings': grpc.unary_unary_rpc_method_handler(
servicer.ListAccessBindings,
request_deserializer=yandex_dot_cloud_dot_access_dot_access__pb2.ListAccessBindingsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_access_dot_access__pb2.ListAccessBindingsResponse.SerializeToString,
),
'SetAccessBindings': grpc.unary_unary_rpc_method_handler(
servicer.SetAccessBindings,
request_deserializer=yandex_dot_cloud_dot_access_dot_access__pb2.SetAccessBindingsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
'UpdateAccessBindings': grpc.unary_unary_rpc_method_handler(
servicer.UpdateAccessBindings,
request_deserializer=yandex_dot_cloud_dot_access_dot_access__pb2.UpdateAccessBindingsRequest.FromString,
response_serializer=yandex_dot_cloud_dot_operation_dot_operation__pb2.Operation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'yandex.cloud.containerregistry.v1.RegistryService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
python
|
#-----------------------------------------------------------------------
#Copyright 2019 Centrum Wiskunde & Informatica, Amsterdam
#
#Author: Daniel M. Pelt
#Contact: [email protected]
#Website: http://dmpelt.github.io/msdnet/
#License: MIT
#
#This file is part of MSDNet, a Python implementation of the
#Mixed-Scale Dense Convolutional Neural Network.
#-----------------------------------------------------------------------
"""
Module for defining and processing validation sets.
"""
from . import store
from . import operations
from . import loss
import abc
import numpy as np
class Validation(abc.ABC):
"""Base class for processing a validation set."""
@abc.abstractmethod
def validate(self, n):
"""Compute validation metrics.
:param n: :class:`.network.Network` to validate with
:return: True if validation metric is lower than best validation error encountered, False otherwise.
"""
pass
@abc.abstractmethod
def to_dict(self):
"""Compute validation metrics."""
pass
@abc.abstractmethod
def load_dict(self, dct):
"""Return a dictionary containing all network variables and parameters.
:return: all network variables and parameters
"""
pass
@classmethod
@abc.abstractmethod
def from_dict(cls, dct):
"""Initialize Validation object from dictionary.
:param dct: dictionary with all parameters
"""
pass
@classmethod
def from_file(cls, fn):
"""Initialize Validation object from file.
:param fn: filename
"""
dct = store.get_dict(fn, 'validation')
return cls.from_dict(dct)
def to_file(self, fn):
"""Save all Validation object parameters to file.
:param fn: filename
"""
store.store_dict(fn, 'validation', self.to_dict())
class LossValidation(Validation):
"""Validation object that computes simple difference metrics.
:param data: list of :class:`.data.DataPoint` objects to validate with.
:param keep: (optional) whether to keep the best, worst, and typical result in memory.
"""
def __init__(self, data, loss=None, keep=True):
self.d = data
self.keep = keep
self.best = np.Inf
self.loss = loss
def errorfunc(self, output, target, msk):
"""Error function used for validation.
:param output: network output image.
:param target: target image.
:param mask: mask image to indicate where to compute error function for.
:return: error function value.
"""
lv = self.loss.lossvalue(output, target, msk)
if msk is None:
npix = target.size
else:
npix = target.shape[0]*(msk>0).sum()
return lv/npix
def getbest(self):
"""Return the input, target, and network output for best result.
:return: list of images (input, target, network output)
"""
d = self.d[self.idx[0]]
out = []
out.append(d.input)
out.append(d.target)
if self.keep:
out.append(self.outputs[0])
else:
out.append(self.n.forward(d.input))
return out
def getworst(self):
"""Return the input, target, and network output for worst result.
:return: list of images (input, target, network output)
"""
d = self.d[self.idx[1]]
out = []
out.append(d.input)
out.append(d.target)
if self.keep:
out.append(self.outputs[1])
else:
out.append(self.n.forward(d.input))
return out
def getmedian(self):
"""Return the input, target, and network output for median result.
:return: list of images (input, target, network output)
"""
d = self.d[self.idx[2]]
out = []
out.append(d.input)
out.append(d.target)
if self.keep:
out.append(self.outputs[2])
else:
out.append(self.n.forward(d.input))
return out
def validate(self, n):
self.n = n
errs = np.zeros(len(self.d))
if self.keep:
self.outputs = [0,0,0]
low = np.Inf
high = -np.Inf
self.idx = [0,0,0]
for i,d in enumerate(self.d):
out = self.n.forward(d.input)
err = self.errorfunc(out, d.target, d.mask)
errs[i] = err
if err<low:
low = err
self.idx[0] = i
if self.keep:
self.outputs[0] = out
if err>high:
high = err
self.idx[1] = i
if self.keep:
self.outputs[1] = out
median = np.argsort(errs)[errs.shape[0]//2]
self.idx[2] = median
if self.keep:
if median==self.idx[0]:
self.outputs[2] = self.outputs[0]
elif median==self.idx[1]:
self.outputs[2] = self.outputs[1]
else:
self.outputs[2] = self.n.forward(self.d[median].input)
error = errs.mean()
self.curerr = error
if error<self.best:
self.best = error
return True
return False
def to_dict(self):
dct = {}
dct['best'] = self.best
dct['keep'] = self.keep
return dct
def load_dict(self, dct):
self.best = dct['best']
self.keep = dct['keep']
@classmethod
def from_dict(cls, dct):
v = cls(None, None)
v.load_dict(dct)
return v
# For backwards compatibility, uses L2 norm
class MSEValidation(LossValidation):
def __init__(self, data, keep=True):
super().__init__(data, loss=loss.L2Loss(), keep=keep)
|
python
|
import numpy as np
from utils.misc import arr2grid
from planner.astar import AStar
from planner.dijkstra import Dijkstra
from planner.bestfirst import BestFirst
from planner.breadthfirst import BreadthFirst
from planner.bi_astar import BiAStar
from planner.bi_dijkstra import BiDijkstra
from planner.bi_bestfirst import BiBestFirst
# e.g [[1, 0, 1],
# [1, 0, 1],
# [1, 1, 1]]
img = np.array([[1,1,1],[1,0,1],[1,0,1]])
# convert array to networkx graph
grid = arr2grid(img, diagonal=True)
source = (2,0)
target = (2,2)
#target = [(1,1),(2,2),(2,1)]
rp = BiAStar()
route = rp.multi_plan([((2,0), (2,2)), ((2,0), (0,0))], graph=grid)
#route = rp.plan(source, target, grid)
print(route)
|
python
|
#!/usr/bin/env python3
# coding: utf8
# Author: Lenz Furrer, 2017
'''
Formatter base classes.
'''
import os
import io
from lxml import etree
class Formatter:
'''
Base class for all formatters.
'''
ext = None
binary = False # text or binary format?
def __init__(self, config, fmt_name):
self.config = config
self.fmt_name = fmt_name
def export(self, content):
'''
Write this content to disk.
'''
open_params = self._get_open_params(content)
try:
f = open(**open_params)
except FileNotFoundError:
# An intermediate directory didn't exist.
# Create it and try again.
# (Use exist_ok because of race conditions -- another
# worker might have created it in the meantime.)
os.makedirs(os.path.dirname(open_params['file']), exist_ok=True)
f = open(**open_params)
with f:
self.write(f, content)
def write(self, stream, content):
'''
Write this content to an open file.
'''
raise NotImplementedError()
def dump(self, content):
'''
Serialise the content to str or bytes.
'''
raise NotImplementedError()
def _get_open_params(self, content):
path = self.config.get_out_path(content.id_, content.basename,
self.fmt_name, self.ext)
if self.binary:
return dict(file=path, mode='wb')
else:
return dict(file=path, mode='w', encoding='utf8')
class MemoryFormatter(Formatter):
'''
Abstract formatter with a primary dump method.
Subclasses must override dump(), on which write() is based.
'''
def write(self, stream, content):
stream.write(self.dump(content))
class StreamFormatter(Formatter):
'''
Abstract formatter with a primary write method.
Subclasses must override write(), on which dump() is based.
'''
def dump(self, content):
if self.binary:
buffer = io.BytesIO()
else:
buffer = io.StringIO()
self.write(buffer, content)
return buffer.getvalue()
class XMLMemoryFormatter(MemoryFormatter):
'''
Formatter for XML-based output.
Subclasses must define a method _dump() which returns
an lxml.etree.Element node.
'''
ext = 'xml'
binary = True
def dump(self, content):
node = self._dump(content)
return self._tostring(node)
def _dump(self, content):
raise NotImplementedError()
@staticmethod
def _tostring(node, **kwargs):
kwargs.setdefault('encoding', "UTF-8")
kwargs.setdefault('xml_declaration', True)
kwargs.setdefault('pretty_print', True)
return etree.tostring(node, **kwargs)
|
python
|
# 첫 도착지
# 짝수: 1 -> 2
# 홀수: 1 -> 3
# 두번째 도착지
# 짝수: 1 -> 3
# 홀수: 1 -> 2
# 3번째 도착지
# 작은 아이가 두번째 아이에게 얹힌다. (마지막을 제외하고 다시 뭉친다.)
# 4번째
# 3번째로 큰 아이가 빈 곳으로 간다.
# 5번째
# 작은 아이가 두번째 + 3번째 아이에게 얹힌다. (마지막을 제외하고 다시 뭉친다.)
import sys
n = int(sys.stdin.readline())
count = 0
# 매개변수 : 총 개수, 시작, 목표, other,...?
def hanoi(total, start, destination, other, count):
# base Case - 남은 원반의 개수가 없을 시 종료한다.
if total == 1:
count += 1
return
print(start , '->' , destination)
hanoi(total-1, start, other, destination)
print(start ,'-->', destination)
hanoi(total-1, other, destination, start)
print(start, '--->', destination)
hanoi(n, 1, 3, 2, 0)
|
python
|
from __future__ import unicode_literals
from .common import InfoExtractor
class YouJizzIE(InfoExtractor):
_VALID_URL = r'https?://(?:\w+\.)?youjizz\.com/videos/(?:[^/#?]+)?-(?P<id>[0-9]+)\.html(?:$|[?#])'
_TESTS = [{
'url': 'http://www.youjizz.com/videos/zeichentrick-1-2189178.html',
'md5': '78fc1901148284c69af12640e01c6310',
'info_dict': {
'id': '2189178',
'ext': 'mp4',
'title': 'Zeichentrick 1',
'age_limit': 18,
}
}, {
'url': 'http://www.youjizz.com/videos/-2189178.html',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
# YouJizz's HTML5 player has invalid HTML
webpage = webpage.replace('"controls', '" controls')
age_limit = self._rta_search(webpage)
video_title = self._html_search_regex(
r'<title>\s*(.*)\s*</title>', webpage, 'title')
info_dict = self._parse_html5_media_entries(url, webpage, video_id)[0]
info_dict.update({
'id': video_id,
'title': video_title,
'age_limit': age_limit,
})
return info_dict
|
python
|
# see https://docs.python.org/3/reference/expressions.html#operator-precedence
# '|' is the least binding numeric operator
# '^'
# OK: 1 | (2 ^ 3) = 1 | 1 = 1
# BAD: (1 | 2) ^ 3 = 3 ^ 3 = 0
print(1 | 2 ^ 3)
# '&'
# OK: 3 ^ (2 & 1) = 3 ^ 0 = 3
# BAD: (3 ^ 2) & 1 = 1 & 1 = 1
print(3 ^ 2 & 1)
# '<<', '>>'
# OK: 2 & (3 << 1) = 2 & 6 = 2
# BAD: (2 & 3) << 1 = 2 << 1 = 4
print(2 & 3 << 1)
# OK: 6 & (4 >> 1) = 6 & 2 = 2
# BAD: (6 & 4) >> 1 = 2 >> 1 = 1
print(6 & 4 >> 1)
# '+', '-'
# OK: 1 << (1 + 1) = 1 << 2 = 4
# BAD: (1 << 1) + 1 = 2 + 1 = 3
print(1 << 1 + 1)
# '*', '/', '//', '%'
# OK: 2 + (2 * 2) = 2 + 4 = 6
# BAD: (2 + 2) * 2 = 4 * 2 = 8
print(2 + 2 * 2)
# '+x', '-x', '~x'
# '**'
# OK: -(2**2) = -4
# BAD: (-2)**2 = 4
print(-2**2)
# OK: 2**(-1) = 0.5
print(2**-0)
# (expr...)
print((2 + 2) * 2)
|
python
|
import sys
import os
def jeff():
print('quick attack the enemy press a to attack b to block c for super ')
bob=10
alice=60
turn1=0
turn2=2
spr=5
mod1=0
mod2=0
speed=0
c1=print('bob health is ',bob)
c2=print('alice health is ',alice)
print(c1,c2)
while(alice>0):
print(spr,'spr is')
print('bob health is ',bob)
print('alice health is ',alice)
a4=input("a to attack b to block c for super h for gaining health ")
if a4=='a':
print('bob attacks alice')
alice-=5
turn1+=1
spr+=1
mod1=turn1%2
print('bob health is ',bob)
print('alice health is ',alice)
if mod1 > 0:
print("Alice counter attacks")
bob-=1
spr+=1
speed-=1
print('bob health is ',bob)
print('alice health is ',alice)
else:
print("successful attack")
print('bob health is ',bob)
print('alice health is ',alice)
elif bob < 0:
print('''game over Bob died
press 1 to continue anything else to
quit''')
a5= input("1 or quit")
if a5 == '1' :
jeff()
else:
sys.exit(0)
#break
elif a4=='b':
print('Bob blocks')
if speed=='0':
print('Block unsucessful')
bob-=2
print('bob health is ',bob)
print('alice health is ',alice)
speed+=1
turn2+=1
spr+=1
mod2=turn1%2
print('bob health is ',bob)
print('alice health is ',alice)
if mod2>0:
print('Bob counterattacks')
alice-=10
spr+=1
print('bob health is ',bob)
print('alice health is ',alice)
elif(a4=='c'):
if spr != 0 :#have to be un quoted
print('super attack')
alice-=15
spr=0
speed=0
print(spr,'spr is')
else:
print("No super attack charge up")
print("alice attacks")
bob-=1
speed+=1
turn1+=1
turn2+=1
else:
print("please select a proper option")
turn1+=1
mod1=turn1%2
if mod1 >0 :
print('recovered 2 HP')
bob+=2
mod1=turn2
turn1=turn2
turn2=mod1
print('bob health is ',bob)
print('alice health is ',alice)
jeff()
a3=input("QUICK DRAW YOU gun and shoot 1 for left and 2 for right")
if a3 == '1':
print("you died")
elif a3 == '2':
print("You killed rebel leader and escaped the facility")
else:
print("a swordsman appeared and killed the guard and took your character out")
a = """
A rebellion is rising
i neeed your help
Then you followed him
"""
print(a)
print("game over")
|
python
|
wild = "https://image.ibb.co/dPStdz/wild.png"
wild_plus_four = "https://image.ibb.co/jKctdz/wild_4.png"
red = {
'0': 'https://image.ibb.co/gnmtB8/red_0.png',
'1': 'https://image.ibb.co/hvRFPT/red_1.png',
'2': 'https://image.ibb.co/f9xN4T/red_2.png',
'3': 'https://image.ibb.co/hDB4Jo/red_3.png',
'4': 'https://image.ibb.co/m5RFPT/red_4.png',
'5': 'https://image.ibb.co/bSVLr8/red_5.png',
'6': 'https://image.ibb.co/dkRFPT/red_6.png',
'7': 'https://image.ibb.co/grPfr8/red_7.png',
'8': 'https://image.ibb.co/jxM4Jo/red_8.png',
'9': 'https://image.ibb.co/j6vydo/red_9.png',
'skip': 'https://image.ibb.co/cHBDTo/red_skip.png',
'reverse': 'https://image.ibb.co/mdoGg8/red_reverse.png',
'+2': 'https://image.ibb.co/hORDTo/red_2.png',
}
green = {
'0': 'https://image.ibb.co/gXUS4T/green_0.png',
'1': 'https://image.ibb.co/iDZGyo/green_1.png',
'2': 'https://image.ibb.co/f1RUJo/green_2.png',
'3': 'https://image.ibb.co/h6fBW8/green_3.png',
'4': 'https://image.ibb.co/mE8byo/green_4.png',
'5': 'https://image.ibb.co/fgOn4T/green_5.png',
'6': 'https://image.ibb.co/iAU5r8/green_7.png',
'7': 'https://image.ibb.co/naituT/green_7.png',
'8': 'https://image.ibb.co/fyMJB8/green_8.png',
'9': 'https://image.ibb.co/iqjido/green_9.png',
'skip': 'https://image.ibb.co/btj6g8/green_skip.png',
'reverse': 'https://image.ibb.co/jDeTuT/green_reverse.png',
'+2': 'https://image.ibb.co/b0gmg8/green_2.png',
}
blue = {
'0': 'https://image.ibb.co/im3vPT/blue_0.png',
'1': 'https://image.ibb.co/k4Aydo/blue_1.png',
'2': 'https://image.ibb.co/efuPJo/blue_2.png',
'3': 'https://image.ibb.co/fyqLr8/blue_3.png',
'4': 'https://image.ibb.co/kbOJdo/blue_4.png',
'5': 'https://image.ibb.co/mHnuJo/blue_5.png',
'6': 'https://image.ibb.co/bW81yo/blue_6.png',
'7': 'https://image.ibb.co/dLwodo/blue_7.png',
'8': 'https://image.ibb.co/nqyejT/blue_8.png',
'9': 'https://image.ibb.co/kRrMyo/blue_9.png',
'skip': 'https://image.ibb.co/buExM8/blue_skip.png',
'reverse': 'https://image.ibb.co/cuQ418/blue_reverse.png',
'+2': 'https://image.ibb.co/nx2TTo/blue_2.png',
}
yellow = {
'0': 'https://image.ibb.co/kf5ZjT/yellow_0.png',
'1': 'https://image.ibb.co/d6o9Jo/yellow_1.png',
'2': 'https://image.ibb.co/ghf0PT/yellow_2.png',
'3': 'https://image.ibb.co/eHZido/yellow_3.png',
'4': 'https://image.ibb.co/d9tLPT/yellow_4.png',
'5': 'https://image.ibb.co/b6CEjT/yellow_5.png',
'6': 'https://image.ibb.co/bzFZjT/yellow_6.png',
'7': 'https://image.ibb.co/eD0ZjT/yellow_7.png',
'8': 'https://image.ibb.co/mfa0PT/yellow_8.png',
'9': 'https://image.ibb.co/h4JLPT/yellow_9.png',
'skip': 'https://image.ibb.co/btpmET/yellow_skip.png',
'reverse': 'https://image.ibb.co/kbt2oo/yellow_reverse.png',
'+2': 'https://image.ibb.co/dvVtuT/yellow_2.png',
}
|
python
|
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
# Copyright 2020 Tecnativa - Pedro M. Baeza
from odoo import api, fields, models
class SaleOrder(models.Model):
_inherit = "sale.order"
type_id = fields.Many2one(
comodel_name="sale.order.type",
string="Type",
compute="_compute_sale_type_id",
store=True,
readonly=True,
states={
"draft": [("readonly", False)],
"sent": [("readonly", False)],
},
default=lambda so: so._default_type_id(),
ondelete="restrict",
copy=True,
)
@api.model
def _default_type_id(self):
return self.env["sale.order.type"].search([], limit=1)
@api.depends("partner_id", "company_id")
def _compute_sale_type_id(self):
for record in self:
if not record.partner_id:
record.type_id = self.env["sale.order.type"].search(
[("company_id", "in", [self.env.company.id, False])], limit=1
)
else:
sale_type = (
record.partner_id.with_company(record.company_id).sale_type
or record.partner_id.commercial_partner_id.with_company(
record.company_id
).sale_type
)
if sale_type:
record.type_id = sale_type
@api.onchange("type_id")
def onchange_type_id(self):
# TODO: To be changed to computed stored readonly=False if possible in v14?
vals = {}
for order in self:
order_type = order.type_id
# Order values
vals = {}
if order_type.payment_term_id:
vals.update({"payment_term_id": order_type.payment_term_id})
if order_type.pricelist_id:
vals.update({"pricelist_id": order_type.pricelist_id})
if vals:
order.update(vals)
@api.model
def create(self, vals):
if vals.get("name", "/") == "/" and vals.get("type_id"):
sale_type = self.env["sale.order.type"].browse(vals["type_id"])
if sale_type.sequence_id:
vals["name"] = sale_type.sequence_id.next_by_id()
return super(SaleOrder, self).create(vals)
def _prepare_invoice(self):
res = super(SaleOrder, self)._prepare_invoice()
if self.type_id.journal_id:
res["journal_id"] = self.type_id.journal_id.id
if self.type_id:
res["sale_type_id"] = self.type_id.id
return res
|
python
|
import importlib
from functools import partial
from multiprocessing import Process, Queue
from flask import Flask, request
app = Flask("serverfull")
bees = ["a", "b"] # TODO: get this from somewhere
workers = {}
def bee_loop(handler, inq, outq):
request = inq.get()
print("Got request")
outq.put(handler(request))
def generic_handler(bee_path):
_, inq, outq = workers[bee_path]
print(f"Putting {request.args}")
inq.put(request.args)
return outq.get()
for bee in bees:
bee_path = f"bees.{bee}"
print(f"Importing {bee_path}")
bee_mod = importlib.import_module(bee_path)
bee_mod = importlib.reload(bee_mod) # TODO: be smarter, but who cares
print(f"/bees/{bee} => {bee_mod.handler}")
inq = Queue()
outq = Queue()
proc = Process(target=bee_loop, args=(bee_mod.handler, inq, outq))
proc.start()
workers[bee_path] = [proc, inq, outq]
app.add_url_rule(f"/bees/{bee}", f"bee.{bee}", partial(generic_handler, (bee_path)))
|
python
|
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
facedetection = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
while True:
ret, frame = cap.read()
gry = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
face = facedetection.detectMultiScale(gry,1.3,5)
for (x,y,w,h) in face:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0), 5)
cv2.imshow('frame',frame)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
python
|
# Copyright 2017 VMware Inc. All rights reserved.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import copy
from congress import exception
from congress.library_service import library_service
from congress.tests import base
class TestLibraryService(base.SqlTestCase):
def setUp(self):
super(TestLibraryService, self).setUp()
self.library = library_service.LibraryService('lib-test')
self.library.delete_all_policies() # clear pre-loaded library policies
self.policy1 = {'name': 'policy1', 'abbreviation': 'abbr',
'kind': 'database', 'description': 'descrip',
'rules': [{'rule': 'p(x) :- q(x)',
'comment': 'test comment',
'name': 'testname'}]}
self.policy2 = {'name': 'policy2', 'abbreviation': 'abbr',
'kind': 'database', 'description': 'descrip',
'rules': [{'rule': 'p(x) :- q(x)',
'comment': 'test comment',
'name': 'testname'}]}
self.policy1_meta = copy.deepcopy(self.policy1)
self.policy2_meta = copy.deepcopy(self.policy2)
del self.policy1_meta['rules']
del self.policy2_meta['rules']
def test_create_policy_no_name(self):
self.assertRaises(exception.InvalidPolicyInput,
self.library.create_policy, {'rules': []})
def test_create_policy_no_rules(self):
self.assertRaises(exception.InvalidPolicyInput,
self.library.create_policy, {'name': 'policy1'})
def test_create_policy_other_schema_violations(self):
# name too long (255 limit)
policy_item = {
'name': 'policy2', 'abbreviation': 'abbr',
'kind': 'database', 'description': 'descrip',
'rules': [{
'rule': 'p(x) :- q(x)',
'comment': 'test comment',
'name':
'111111111111111111111111111111111111111111111111111111111'
'111111111111111111111111111111111111111111111111111111111'
'111111111111111111111111111111111111111111111111111111111'
'111111111111111111111111111111111111111111111111111111111'
'11111111111111111111111111111'}]}
self.assertRaises(exception.InvalidPolicyInput,
self.library.create_policy, policy_item)
# comment too long (255 limit)
policy_item = {
'name': 'policy2', 'abbreviation': 'abbr',
'kind': 'database', 'description': 'descrip',
'rules': [{
'rule': 'p(x) :- q(x)',
'comment':
'111111111111111111111111111111111111111111111111111111111'
'111111111111111111111111111111111111111111111111111111111'
'111111111111111111111111111111111111111111111111111111111'
'111111111111111111111111111111111111111111111111111111111'
'11111111111111111111111111111',
'name': 'testname'}]}
self.assertRaises(exception.InvalidPolicyInput,
self.library.create_policy, policy_item)
# rule item missing 'rule' property
policy_item = {
'name': 'policy2', 'abbreviation': 'abbr',
'kind': 'database', 'description': 'descrip',
'rules': [{
'comment': 'test comment',
'name': 'testname'}]}
self.assertRaises(exception.InvalidPolicyInput,
self.library.create_policy, policy_item)
def test_create_policy_bad_name(self):
self.assertRaises(exception.PolicyException,
self.library.create_policy,
{'name': 'disallowed-hyphen', 'rules': []})
def test_create_policy_default(self):
res = self.library.create_policy({'name': 'policy1', 'rules': []})
self.assertEqual(res, {'id': res['id'], 'abbreviation': 'polic',
'kind': 'nonrecursive', 'name': 'policy1',
'description': '', 'rules': []})
def test_create_policy(self):
policy_obj = self.library.create_policy(self.policy1)
self.policy1['id'] = policy_obj['id']
self.assertEqual(policy_obj, self.policy1)
def test_create_policy_duplicate(self):
self.library.create_policy({'name': 'policy1', 'rules': []})
self.assertRaises(KeyError, self.library.create_policy,
{'name': 'policy1', 'rules': []})
res = self.library.get_policies()
self.assertEqual(len(res), 1)
def test_get_policy_empty(self):
res = self.library.get_policies()
self.assertEqual(res, [])
self.assertRaises(KeyError, self.library.get_policy,
'nosuchpolicy')
self.assertRaises(KeyError, self.library.get_policy_by_name,
'nosuchpolicy')
def test_create_get_policy(self):
policy_obj = self.library.create_policy(self.policy1)
self.policy1['id'] = policy_obj['id']
self.policy1_meta['id'] = policy_obj['id']
res = self.library.get_policies()
self.assertEqual(res, [self.policy1])
res = self.library.get_policy(policy_obj['id'])
self.assertEqual(res, self.policy1)
res = self.library.get_policy_by_name(policy_obj['name'])
self.assertEqual(res, self.policy1)
res = self.library.get_policies(include_rules=True)
self.assertEqual(res, [self.policy1])
res = self.library.get_policy(policy_obj['id'], include_rules=False)
self.assertEqual(res, self.policy1_meta)
res = self.library.get_policy_by_name(policy_obj['name'],
include_rules=False)
self.assertEqual(res, self.policy1_meta)
self.assertRaises(KeyError, self.library.get_policy, 'no_such_policy')
self.assertRaises(KeyError, self.library.get_policy_by_name,
'no_such_policy')
def test_delete_policy(self):
self.assertRaises(KeyError, self.library.delete_policy,
'policy1')
policy_obj = self.library.create_policy(self.policy1)
self.policy1['id'] = policy_obj['id']
policy_obj = self.library.create_policy(self.policy2)
self.policy2['id'] = policy_obj['id']
res = self.library.get_policies()
self.assertEqual(len(res), 2)
self.assertTrue(all(p in res
for p in [self.policy1, self.policy2]))
self.assertRaises(KeyError, self.library.delete_policy,
'no_such_policy')
res = self.library.delete_policy(self.policy1['id'])
self.assertEqual(res, self.policy1)
res = self.library.get_policies()
self.assertEqual(len(res), 1)
self.assertEqual(res[0], self.policy2)
res = self.library.delete_policy(self.policy2['id'])
self.assertEqual(res, self.policy2)
res = self.library.get_policies()
self.assertEqual(len(res), 0)
def test_delete_policies(self):
self.library.delete_all_policies()
res = self.library.get_policies()
self.assertEqual(len(res), 0)
self.library.create_policy(
{'name': 'policy1', 'abbreviation': 'abbr', 'kind': 'database',
'description': 'descrip', 'rules': [{'rule': 'p(x) :- q(x)',
'comment': 'test comment',
'name': 'testname'}]})
self.library.create_policy(
{'name': 'policy2', 'abbreviation': 'abbr', 'kind': 'database',
'description': 'descrip', 'rules': [{'rule': 'p(x) :- q(x)',
'comment': 'test comment',
'name': 'testname'}]})
self.library.delete_all_policies()
res = self.library.get_policies()
self.assertEqual(len(res), 0)
def test_replace_policy(self):
policy1 = self.library.create_policy(
{'name': 'policy1', 'abbreviation': 'abbr', 'kind': 'database',
'description': 'descrip', 'rules': [{'rule': 'p(x) :- q(x)',
'comment': 'test comment',
'name': 'testname'}]})
policy2 = self.library.create_policy(
{'name': 'policy2', 'abbreviation': 'abbr', 'kind': 'database',
'description': 'descrip', 'rules': [{'rule': 'p(x) :- q(x)',
'comment': 'test comment',
'name': 'testname'}]})
replacement_policy = {
"name": "new_name",
"description": "new test policy2 description",
"kind": "nonrecursive",
"abbreviation": "newab",
"rules": [{"rule": "r(x) :- c(x)", "comment": "test comment",
"name": "test name"}]
}
# update non-existent item
self.assertRaises(KeyError,
self.library.replace_policy, 'no_such_id',
replacement_policy)
# update existing item
self.library.replace_policy(policy2['id'], replacement_policy)
replacement_policy_w_id = copy.deepcopy(replacement_policy)
replacement_policy_w_id['id'] = policy2['id']
ret = self.library.get_policies()
self.assertEqual(len(ret), 2)
self.assertTrue(all(p in ret
for p in [policy1,
replacement_policy_w_id]))
|
python
|
# -*- coding: UTF-8 -*-
"""
Automatized configuration and execution of Inspect peptide identification for
a list of spectrum files and a list of reference proteomes. Specifications of
posttranslational modifications can either be directly passed by the user or
assigned to the dataset by its filename (if dataset group is already known).
@author: Anke Penzlin, June 2013
"""
import re
import os
import sys
import optparse
#from InspectParser_FDRcut import parseInspect
from simulation_based_similarity import prepDB, run_inspect
def runInspect_config(spectra,
DBs,
spec_path,
db_path="/data/NG4/anke/proteome/",
inspect_dir = "/home/franziska/bin/Inspect/",
conf = "/data/NG4/anke/Inspect/config_Inspect_py.txt",
user_mods=""):
"""
run Inspect for each pair of spectrum dataset and proteome database using
modifications according to the dataset in the configuration file.
"""
rngDB = range(len(DBs)) # 3 for example
rngSpc = range(len(spectra)) # 2 for example
simMat = [ [0 for i in rngDB] for j in rngSpc ] # initializing output: [[0, 0, 0], [0, 0, 0]]
for i in rngSpc:
specs = spec_path+spectra[i]+".mgf"
for j in rngDB:
db_j = db_path+DBs[j]+"_decoy.trie"
# create trie if necessary (.trie and .index created simultaneously)
if not os.path.exists(db_j):
# a prepare decoyDB input for Inspect (def)
go_on = prepDB(db_path+DBs[j]+"_decoy.fasta", path=inspect_dir) # Convert a protein database into concatenated format.
if not go_on: return
inspect_out = specs[:-4] +"_"+DBs[j]+"_InspectOut.txt" # -4 to remove file extension: .mgf
# prepare configfile for InspecT
conf_out = open(conf,'w')
conf_out.write("spectra,"+specs+"\n")
conf_out.write("instrument,FT-Hybrid\n")
conf_out.write("protease,Trypsin\n")
conf_out.write("DB,"+db_j+"\n")
if not user_mods == "":
conf_out.write(user_mods)
elif re.search("Lacto_131",spectra[i]):
conf_out.write("mod,46.0916,C,fix\n")
conf_out.write("mod,15.994915,M\n")
conf_out.write("# iTraq\n")
conf_out.write("mod,144.1544,K,fix\n")
conf_out.write("mod,144.1544,*,nterminal\n")
print "modifications according to acc. nr. 13105-13162"
sys.stdout.flush()
elif re.search("Shigelladys",spectra[i]):
conf_out.write("mod,46.0916,C,fix\n")
conf_out.write("mod,15.994915,M\n")
print "modifications according to http://www.biomedcentral.com/1471-2180/11/147#sec2"
sys.stdout.flush()
else:
conf_out.write("# Protecting group on cysteine\n")
conf_out.write("mod,57.021464,C,fix\n")
if re.search("Bacicer_113",spectra[i]):
conf_out.write("mod,15.994915,M\n")
print "modifications according to acc. nr. 11339-11362"
sys.stdout.flush()
elif re.search("Bacisub_175",spectra[i]):
conf_out.write("mod,15.994915,M\n")
conf_out.write("mod,119.1423,C\n")
conf_out.write("mod,396.37,C\n")
print "modifications according to acc. nr. 17516-17659"
sys.stdout.flush()
elif re.search("Ecoli_12",spectra[i]):
conf_out.write("mod,32,M,opt\n")
print "modifications according to acc. nr. 12189-12199"
sys.stdout.flush()
elif re.search("Strepyo_1923",spectra[i]):
conf_out.write("mod,15.994915,M\n")
conf_out.write("mod,79.9799,STY\n")
print "modifications according to acc. nr. 19230/19231"
sys.stdout.flush()
elif re.search("CPXV_",spectra[i]):
conf_out.write("mod,15.994915,M\n")#oxidation
conf_out.write("mod,42.010565,*,nterminal\n")#acetylation
print "modifications according to standard configuration (for pox)"
elif re.search("MSSim",spectra[i]):
conf_out.write("mod,0.984016,NQ\n")
conf_out.write("mod,15.994915,M\n")
print "modifications according to (simulation) standard configuration"
sys.stdout.flush()
else:
# conf_out.write("mod,15.994915,M\n")#oxidation
# conf_out.write("mod,42.010565,*,nterminal\n")#acetylation
#conf_out.write("mod,0.984016,NQ\n")
print "modifications according to (unspecified) standard configuration"
sys.stdout.flush()
conf_out.write("mods,2\n")
if re.search("Shigelladys",spectra[i]):
conf_out.write("PMTolerance,1.4\n")
conf_out.write("IonTolerance,0.5\n")
conf_out.write("MultiCharge,3\n")
else:
conf_out.write("ParentPPM,10\n")
conf_out.write("IonTolerance,0.8\n")
conf_out.close()
# run Inspect: match spectra against database
if re.search( "Ecoli_12", spectra[i] ):
AA_file = inspect_dir + "AminoAcidMasses_15N.txt"
if os.path.exists(AA_file):
run_inspect(conf, inspect_out, inspect_dir, "-a "+AA_file)
print "amino acid masses according to 15N (because of special e.coli data set)."
sys.stdout.flush()
else:
run_inspect(conf, inspect_out, inspect_dir)
print "WARNING: file containing amino acid masses according to 15N not found!\nDatabase search using usual file disregarding special e.coli data set)."
sys.stdout.flush()
else:
run_inspect(conf, inspect_out, inspect_dir)
# # evaluate results from Inspect to calculate an FDR-matrix
# simMat[i][j] = parseInspect(inspect_out)[2]
for line in simMat:
print line
if __name__=="__main__":
usage = """%prog SPECTRA DB_LIST -s SPEC_DIR -d DB_DIR
run InsPecT (for multiple spectrum datasets and references),
using known modification options (assigned by filename),
and calculate FDR-corrected identification counts from InsPecT output.
SPECTRA: ','-separated spectrum-filenames (mgf-format) without file extension
DB_LIST: ','-separated proteome-filenames (fasta-format) without file extension
Using the easy mode (--easy) to have a quick understand of this function.
"""
# configure the parser
optparser = optparse.OptionParser(usage=usage)
optparser.add_option('-s', '--specdir', type='string', dest='spec_dir', default="/data/NG4/anke/spectra/", help='directory of specFiles (absolute path!). [default: %default]')
optparser.add_option('-d', '--dbdir', type='string', dest='db_dir', default="/data/NG4/anke/proteome/", help='directory of proteinDBs (absolute path!). [default: %default]')
optparser.add_option('-c', '--configfile', type='string', dest='config', default="/data/NG4/anke/Inspect/config_Inspect_py.txt", help='a txt-file for Inspect configuration, will be written. [default: %default]')
optparser.add_option('-m', '--mods', type='string', dest='mods', default="", help='a string containing all modifications in question, modification choice by filename if "". [default: %default]')
optparser.add_option('-i', '--inspect_dir', type='string', dest='ins_dir', default="/home/franziska/bin/Inspect", help='directory of Inspect.exe. [default: %default]')
optparser.add_option('-e', '--easy', type='string', dest='easy', default=None, help='For the beginner, setting the value to True to run this script. Use true or True to activate.')
# parse options and arguments
options, args = optparser.parse_args()
if not options.easy:
if len(args) == 2:
spectra = args[0].split(',')
db_list = args[1].split(',')
else:
optparser.print_help()
sys.exit(1)
'''db_path = options.db_dir
spec_path = options.spec_dir
configfile = options.config # Inspect configuration file
mods = options.mods
inspect_dir = options.ins_dir'''
runInspect_config(spectra=spectra, DBs=db_list, spec_path=options.spec_dir, db_path=options.db_dir, inspect_dir=options.ins_dir, conf=options.config, user_mods=options.mods)
# Easy mode
if (options.easy).lower() == 'true':
runInspect_config(
spectra=['example'],
DBs=['species1', 'species2'],
spec_path='../data/spectra/',
db_path='../data/reference/',
inspect_dir='./inspect/',
conf='./config_files/config_Inspect_py.txt',
user_mods='')
|
python
|
from django.db import models
# Create your models here.
class TipoElectrodomestico(models.Model):
nombre = models.CharField(max_length=200)
foto = models.ImageField(null =True, blank=True)
def __str__(self):
#Identificar un objeto
return self.nombre
def numProductos(self):
pass
class Producto(models.Model):
nombre = models.CharField(max_length=200)
tipo = models.ForeignKey(TipoElectrodomestico, on_delete=models.CASCADE)
precio = models.IntegerField()
descripcion = models.TextField()
foto = models.ImageField(blank = True, null=True)
calificacion = models.FloatField(default=0)
marca = models.CharField(max_length=20, default="")
ref = models.CharField(max_length=100, default="")
@property #=> convierte un método en un atributo
def tipoEl(self):
#infoTipo = {"nombre": "Televisores", "id":2, "foto":None}
from Productos.serializers import TipoSerial
return TipoSerial(self.tipo).data
def __str__(self):
return self.nombre
@property
def calcularCalificacion(self):
comentarios = self.comentario_set.all()
calificacion = 0
for comentario in comentarios:
calificacion += comentario.calificacion
return calificacion/len(comentarios)
class Comentario(models.Model):
usuario = models.CharField(max_length=100)
producto = models.ForeignKey(Producto, on_delete=models.CASCADE)
calificacion = models.FloatField()
fecha = models.DateField(auto_now_add=True) #16/09/2021
#DateTimeField() 16/09/2021 - 3:13:40 p.m.
#TimeField()
contenido = models.TextField()
def __str__(self):
return self.usuario + " - " + self.producto.nombre
|
python
|
# Copyright 2021 Lucas Fidon and Suprosanna Shit
"""
Data loader for a single case.
This is typically used for inference.
"""
import torch
from monai.data import Dataset, DataLoader
def single_case_dataloader(inference_transform, input_path_dict):
"""
:param inference_transform
:param input_path_dict: dict; keys=image_keys, values=paths
:return:
"""
data_dicts = [input_path_dict]
ds = Dataset(
data=data_dicts,
transform=inference_transform,
)
loader = DataLoader(
ds,
batch_size=1, # image-level batch to the sliding window method, not the window-level batch
num_workers=0, # you can set it to a value higher than 0 to activate parallel preprocessing; for me it leads to an error...
pin_memory=torch.cuda.is_available(),
)
return loader
|
python
|
import apache_beam as beam
from apache_beam.options.pipeline_options import PipelineOptions
import threading
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
class WriteToCallback(beam.PTransform):
def __init__(self, callback, lock):
self._callback = callback
self._lock = lock
def expand(self, pcoll):
return pcoll | beam.io.iobase.Write(_CallbackSink(self._callback, self._lock))
class _CallbackSink(beam.io.iobase.Sink):
def __init__(self, callback, lock):
self._callback = callback
self._lock = lock
def initialize_write(self):
pass
def open_writer(self, init_result, uid):
return _CallbackWriter(self._callback, self._lock)
def finalize_write(self, init_result, writer_results):
pass
class _CallbackWriter(beam.io.iobase.Writer):
def __init__(self, callback, lock):
self._callback = callback
self._lock = lock
self._working_data = []
def write(self, record):
self._working_data.append(record)
def close(self):
with self._lock:
self._callback(self._working_data)
def make_dump_to_list(visible_list):
def dump(internal_list):
logging.info("Dumping %s" % internal_list)
visible_list.extend(internal_list)
return dump
input = [1, 2, 3]
visible_list = []
lock = threading.Lock()
p = beam.Pipeline(options=PipelineOptions())
data = p | 'CreateInput' >> beam.Create(input)
data | 'DumpToList' >> WriteToCallback(
make_dump_to_list(visible_list), lock)
result = p.run()
result.wait_until_finish()
logging.info("Pipeline finished.")
logging.info("Input: %s", input)
with lock:
logging.info("Visible output: %s", visible_list)
assert input == visible_list
|
python
|
from sys import path
path.append('/home/joerojas/Desarrollo/Curso-Basico-Python/101_misModulos/modules')
import modulo2
zeroes = [0 for i in range(5)]
ones = [1 for i in range(5)]
print(modulo2.suma(zeroes))
print(modulo2.producto(ones))
|
python
|
'''
Classes from the 'LinkPresentation' framework.
'''
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
LPMultipleMetadataPresentationTransformer = _Class('LPMultipleMetadataPresentationTransformer')
LPLinkHTMLTextGenerator = _Class('LPLinkHTMLTextGenerator')
LPLinkMetadataStoreTransformer = _Class('LPLinkMetadataStoreTransformer')
LPTestingOverrides = _Class('LPTestingOverrides')
LPResources = _Class('LPResources')
LPAnimatedImageTranscoder = _Class('LPAnimatedImageTranscoder')
LPLinkMetadataObserver = _Class('LPLinkMetadataObserver')
LPPresentationSpecializations = _Class('LPPresentationSpecializations')
LPYouTubeURLComponents = _Class('LPYouTubeURLComponents')
LPStatistics = _Class('LPStatistics')
LPLinkMetadataPresentationTransformer = _Class('LPLinkMetadataPresentationTransformer')
LPYouTubePlayerScriptMessageHandler = _Class('LPYouTubePlayerScriptMessageHandler')
LPiTunesMediaURLComponents = _Class('LPiTunesMediaURLComponents')
LPAudio = _Class('LPAudio')
LPAudioProperties = _Class('LPAudioProperties')
LPLinkViewComponents = _Class('LPLinkViewComponents')
LPMessagesPayload = _Class('LPMessagesPayload')
RichLinkAttachmentSubstituter = _Class('RichLinkAttachmentSubstituter')
LPTheme = _Class('LPTheme')
LPThemeParametersObserver = _Class('LPThemeParametersObserver')
LPTapToLoadViewStyle = _Class('LPTapToLoadViewStyle')
LPCaptionBarStyle = _Class('LPCaptionBarStyle')
LPMusicPlayButtonStyle = _Class('LPMusicPlayButtonStyle')
LPVideoViewStyle = _Class('LPVideoViewStyle')
LPVideoPlayButtonStyle = _Class('LPVideoPlayButtonStyle')
LPGlyphStyle = _Class('LPGlyphStyle')
LPImageViewStyle = _Class('LPImageViewStyle')
LPButtonStyle = _Class('LPButtonStyle')
LPShadowStyle = _Class('LPShadowStyle')
LPCaptionBarAccessoryStyle = _Class('LPCaptionBarAccessoryStyle')
LPVerticalTextStackViewStyle = _Class('LPVerticalTextStackViewStyle')
LPTextRowStyle = _Class('LPTextRowStyle')
LPTextViewStyle = _Class('LPTextViewStyle')
LPPadding = _Class('LPPadding')
LPSize = _Class('LPSize')
LPPointUnit = _Class('LPPointUnit')
LPStreamingAudioPlayer = _Class('LPStreamingAudioPlayer')
LPVideo = _Class('LPVideo')
LPVideoAttachmentSubstitute = _Class('LPVideoAttachmentSubstitute')
LPVideoProperties = _Class('LPVideoProperties')
LPEventTimeline = _Class('LPEventTimeline')
LPEvent = _Class('LPEvent')
LPiTunesMediaStorefrontMappings = _Class('LPiTunesMediaStorefrontMappings')
LPAppLinkPresentationProperties = _Class('LPAppLinkPresentationProperties')
LPMetadataProviderSpecializationContext = _Class('LPMetadataProviderSpecializationContext')
LPWebLinkPresentationProperties = _Class('LPWebLinkPresentationProperties')
LPCardHeadingPresentationProperties = _Class('LPCardHeadingPresentationProperties')
LPFullScreenVideoController = _Class('LPFullScreenVideoController')
LPMetadataProvider = _Class('LPMetadataProvider')
LPMIMETypeRegistry = _Class('LPMIMETypeRegistry')
LPiTunesMediaMovieBundleUnresolvedMetadata = _Class('LPiTunesMediaMovieBundleUnresolvedMetadata')
LPiTunesMediaMovieUnresolvedMetadata = _Class('LPiTunesMediaMovieUnresolvedMetadata')
LPiTunesMediaTVShowUnresolvedMetadata = _Class('LPiTunesMediaTVShowUnresolvedMetadata')
LPiTunesMediaTVSeasonUnresolvedMetadata = _Class('LPiTunesMediaTVSeasonUnresolvedMetadata')
LPiTunesMediaTVEpisodeUnresolvedMetadata = _Class('LPiTunesMediaTVEpisodeUnresolvedMetadata')
LPiTunesMediaPodcastUnresolvedMetadata = _Class('LPiTunesMediaPodcastUnresolvedMetadata')
LPiTunesMediaPodcastEpisodeUnresolvedMetadata = _Class('LPiTunesMediaPodcastEpisodeUnresolvedMetadata')
LPiTunesMediaAudioBookUnresolvedMetadata = _Class('LPiTunesMediaAudioBookUnresolvedMetadata')
LPiTunesMediaBookUnresolvedMetadata = _Class('LPiTunesMediaBookUnresolvedMetadata')
LPiTunesMediaSoftwareUnresolvedMetadata = _Class('LPiTunesMediaSoftwareUnresolvedMetadata')
LPiTunesMediaRadioUnresolvedMetadata = _Class('LPiTunesMediaRadioUnresolvedMetadata')
LPiTunesMediaPlaylistUnresolvedMetadata = _Class('LPiTunesMediaPlaylistUnresolvedMetadata')
LPiTunesMediaArtistUnresolvedMetadata = _Class('LPiTunesMediaArtistUnresolvedMetadata')
LPiTunesMediaMusicVideoUnresolvedMetadata = _Class('LPiTunesMediaMusicVideoUnresolvedMetadata')
LPiTunesMediaAlbumUnresolvedMetadata = _Class('LPiTunesMediaAlbumUnresolvedMetadata')
LPiTunesMediaSongUnresolvedMetadata = _Class('LPiTunesMediaSongUnresolvedMetadata')
LPiTunesMediaAsset = _Class('LPiTunesMediaAsset')
LPMediaPlaybackManager = _Class('LPMediaPlaybackManager')
LPiTunesMediaOffer = _Class('LPiTunesMediaOffer')
LPiTunesMediaLookupItemArtwork = _Class('LPiTunesMediaLookupItemArtwork')
LPiTunesStoreInformation = _Class('LPiTunesStoreInformation')
LPSettings = _Class('LPSettings')
LPSharingMetadataWrapper = _Class('LPSharingMetadataWrapper')
LPImagePresentationProperties = _Class('LPImagePresentationProperties')
LPCaptionBarPresentationProperties = _Class('LPCaptionBarPresentationProperties')
LPCaptionRowPresentationProperties = _Class('LPCaptionRowPresentationProperties')
LPCaptionPresentationProperties = _Class('LPCaptionPresentationProperties')
LPCaptionButtonPresentationProperties = _Class('LPCaptionButtonPresentationProperties')
LPVideoViewConfiguration = _Class('LPVideoViewConfiguration')
LPApplicationCompatibilityQuirks = _Class('LPApplicationCompatibilityQuirks')
LPURLSuffixChecker = _Class('LPURLSuffixChecker')
LPFetcherGroup = _Class('LPFetcherGroup')
LPFetcherGroupTask = _Class('LPFetcherGroupTask')
LPFetcherConfiguration = _Class('LPFetcherConfiguration')
LPLinkMetadataStatusTransformer = _Class('LPLinkMetadataStatusTransformer')
LPAssociatedApplicationMetadata = _Class('LPAssociatedApplicationMetadata')
LPSpecializationMetadata = _Class('LPSpecializationMetadata')
LPSummarizedLinkMetadata = _Class('LPSummarizedLinkMetadata')
LPAppStoreStoryMetadata = _Class('LPAppStoreStoryMetadata')
LPWalletPassMetadata = _Class('LPWalletPassMetadata')
LPBusinessChatMetadata = _Class('LPBusinessChatMetadata')
LPSharingStatusMetadata = _Class('LPSharingStatusMetadata')
LPApplePhotosStatusMetadata = _Class('LPApplePhotosStatusMetadata')
LPApplePhotosMomentMetadata = _Class('LPApplePhotosMomentMetadata')
LPAppleTVMetadata = _Class('LPAppleTVMetadata')
LPAppleNewsMetadata = _Class('LPAppleNewsMetadata')
LPFileMetadata = _Class('LPFileMetadata')
LPMapCollectionPublisherMetadata = _Class('LPMapCollectionPublisherMetadata')
LPMapCollectionMetadata = _Class('LPMapCollectionMetadata')
LPMapMetadata = _Class('LPMapMetadata')
LPiCloudFamilyInvitationMetadata = _Class('LPiCloudFamilyInvitationMetadata')
LPGameCenterInvitationMetadata = _Class('LPGameCenterInvitationMetadata')
LPiCloudSharingMetadata = _Class('LPiCloudSharingMetadata')
LPiTunesMediaMovieBundleMetadata = _Class('LPiTunesMediaMovieBundleMetadata')
LPiTunesMediaMovieMetadata = _Class('LPiTunesMediaMovieMetadata')
LPAppleMusicTVShowMetadata = _Class('LPAppleMusicTVShowMetadata')
LPiTunesMediaTVSeasonMetadata = _Class('LPiTunesMediaTVSeasonMetadata')
LPiTunesMediaTVEpisodeMetadata = _Class('LPiTunesMediaTVEpisodeMetadata')
LPiTunesMediaPodcastMetadata = _Class('LPiTunesMediaPodcastMetadata')
LPiTunesMediaPodcastEpisodeMetadata = _Class('LPiTunesMediaPodcastEpisodeMetadata')
LPiTunesMediaAudioBookMetadata = _Class('LPiTunesMediaAudioBookMetadata')
LPiTunesMediaBookMetadata = _Class('LPiTunesMediaBookMetadata')
LPiTunesMediaSoftwareMetadata = _Class('LPiTunesMediaSoftwareMetadata')
LPiTunesMediaRadioMetadata = _Class('LPiTunesMediaRadioMetadata')
LPiTunesMediaPlaylistMetadata = _Class('LPiTunesMediaPlaylistMetadata')
LPiTunesUserProfileMetadata = _Class('LPiTunesUserProfileMetadata')
LPiTunesMediaArtistMetadata = _Class('LPiTunesMediaArtistMetadata')
LPiTunesMediaMusicVideoMetadata = _Class('LPiTunesMediaMusicVideoMetadata')
LPiTunesMediaAlbumMetadata = _Class('LPiTunesMediaAlbumMetadata')
LPiTunesMediaSongMetadata = _Class('LPiTunesMediaSongMetadata')
LPAudioMetadata = _Class('LPAudioMetadata')
LPVideoMetadata = _Class('LPVideoMetadata')
LPArtworkMetadata = _Class('LPArtworkMetadata')
LPImageMetadata = _Class('LPImageMetadata')
LPIconMetadata = _Class('LPIconMetadata')
LPLinkMetadata = _Class('LPLinkMetadata')
LPPlaceholderLinkMetadata = _Class('LPPlaceholderLinkMetadata')
LPLinkHTMLGenerator = _Class('LPLinkHTMLGenerator')
LPApplicationIdentification = _Class('LPApplicationIdentification')
LPImageRemoteURLRepresentation = _Class('LPImageRemoteURLRepresentation')
LPImage = _Class('LPImage')
LPImageAttachmentSubstitute = _Class('LPImageAttachmentSubstitute')
LPImageProperties = _Class('LPImageProperties')
LPMetadataProviderSpecialization = _Class('LPMetadataProviderSpecialization')
LPAppleMapsMetadataProviderSpecialization = _Class('LPAppleMapsMetadataProviderSpecialization')
LPFileMetadataProviderSpecialization = _Class('LPFileMetadataProviderSpecialization')
LPiCloudSharingMetadataProviderSpecialization = _Class('LPiCloudSharingMetadataProviderSpecialization')
LPAppStoreStoryMetadataProviderSpecialization = _Class('LPAppStoreStoryMetadataProviderSpecialization')
LPAppleTVMetadataProviderSpecialization = _Class('LPAppleTVMetadataProviderSpecialization')
LPApplePhotosMetadataProviderSpecialization = _Class('LPApplePhotosMetadataProviderSpecialization')
LPiTunesMediaMetadataProviderSpecialization = _Class('LPiTunesMediaMetadataProviderSpecialization')
LPAppleNewsMetadataProviderSpecialization = _Class('LPAppleNewsMetadataProviderSpecialization')
LPRedditMetadataProviderSpecialization = _Class('LPRedditMetadataProviderSpecialization')
LPStreamingMediaMetadataProviderSpecialization = _Class('LPStreamingMediaMetadataProviderSpecialization')
LPInlineMediaPlaybackInformation = _Class('LPInlineMediaPlaybackInformation')
LPLinkMetadataPreviewTransformer = _Class('LPLinkMetadataPreviewTransformer')
LPFetcherResponse = _Class('LPFetcherResponse')
LPFetcherClipMetadataResponse = _Class('LPFetcherClipMetadataResponse')
LPFetcherErrorResponse = _Class('LPFetcherErrorResponse')
LPFetcherURLResponse = _Class('LPFetcherURLResponse')
LPFetcherStringResponse = _Class('LPFetcherStringResponse')
LPFetcherJSONResponse = _Class('LPFetcherJSONResponse')
LPFetcherImageResponse = _Class('LPFetcherImageResponse')
LPFetcherAccessibilityEnabledImageResponse = _Class('LPFetcherAccessibilityEnabledImageResponse')
LPFetcherAudioResponse = _Class('LPFetcherAudioResponse')
LPFetcherAccessibilityEnabledAudioResponse = _Class('LPFetcherAccessibilityEnabledAudioResponse')
LPFetcherVideoResponse = _Class('LPFetcherVideoResponse')
LPFetcherAccessibilityEnabledVideoResponse = _Class('LPFetcherAccessibilityEnabledVideoResponse')
LPFetcher = _Class('LPFetcher')
LPAssociatedApplicationMetadataFetcher = _Class('LPAssociatedApplicationMetadataFetcher')
LPURLFetcher = _Class('LPURLFetcher')
LPMediaAssetFetcher = _Class('LPMediaAssetFetcher')
LPCSSResolver = _Class('LPCSSResolver')
LPCSSVariable = _Class('LPCSSVariable')
LPHTMLComponent = _Class('LPHTMLComponent')
LPEmailCompatibleHTMLCaptionBarRowComponent = _Class('LPEmailCompatibleHTMLCaptionBarRowComponent')
LPHTMLTextComponent = _Class('LPHTMLTextComponent')
LPEmailCompatibleHTMLVerticalTextStackComponent = _Class('LPEmailCompatibleHTMLVerticalTextStackComponent')
LPEmailCompatibleHTMLQuoteComponent = _Class('LPEmailCompatibleHTMLQuoteComponent')
LPHTMLImageComponent = _Class('LPHTMLImageComponent')
LPHTMLTapToLoadComponent = _Class('LPHTMLTapToLoadComponent')
LPEmailCompatibleHTMLCaptionBarItemComponent = _Class('LPEmailCompatibleHTMLCaptionBarItemComponent')
LPEmailCompatibleHTMLTextComponent = _Class('LPEmailCompatibleHTMLTextComponent')
LPEmailCompatibleHTMLLinkComponent = _Class('LPEmailCompatibleHTMLLinkComponent')
LPHTMLVideoComponent = _Class('LPHTMLVideoComponent')
LPHTMLLinkComponent = _Class('LPHTMLLinkComponent')
LPHTMLImageContainerComponent = _Class('LPHTMLImageContainerComponent')
LPEmailCompatibleHTMLTableComponent = _Class('LPEmailCompatibleHTMLTableComponent')
LPHTMLCaptionBarAccessoryComponent = _Class('LPHTMLCaptionBarAccessoryComponent')
LPHTMLMultipleImageComponent = _Class('LPHTMLMultipleImageComponent')
LPHTMLIconComponent = _Class('LPHTMLIconComponent')
LPHTMLHorizontalCaptionPairComponent = _Class('LPHTMLHorizontalCaptionPairComponent')
LPHTMLCaptionBarComponent = _Class('LPHTMLCaptionBarComponent')
LPHTMLGlyphComponent = _Class('LPHTMLGlyphComponent')
LPEmailCompatibleHTMLInnerLinkComponent = _Class('LPEmailCompatibleHTMLInnerLinkComponent')
LPHTMLVerticalTextStackComponent = _Class('LPHTMLVerticalTextStackComponent')
LPEmailCompatibleHTMLImageComponent = _Class('LPEmailCompatibleHTMLImageComponent')
LPEmailCompatibleHTMLIconComponent = _Class('LPEmailCompatibleHTMLIconComponent')
LPHTMLQuoteComponent = _Class('LPHTMLQuoteComponent')
LPEmailCompatibleHTMLCaptionBarComponent = _Class('LPEmailCompatibleHTMLCaptionBarComponent')
LPActionDisablingCALayerDelegate = _Class('LPActionDisablingCALayerDelegate')
LPiTunesMediaLookupTask = _Class('LPiTunesMediaLookupTask')
LPHighlightGestureRecognizer = _Class('LPHighlightGestureRecognizer')
LPYouTubePlayerView = _Class('LPYouTubePlayerView')
LPLinkView = _Class('LPLinkView')
LPPlayButtonShapeView = _Class('LPPlayButtonShapeView')
LPAnimationMaskView = _Class('LPAnimationMaskView')
LPFlippedView = _Class('LPFlippedView')
LPComponentView = _Class('LPComponentView')
LPTapToLoadView = _Class('LPTapToLoadView')
LPCaptionBarButtonView = _Class('LPCaptionBarButtonView')
LPDomainNameIndicator = _Class('LPDomainNameIndicator')
LPCaptionBarView = _Class('LPCaptionBarView')
LPPlayButtonView = _Class('LPPlayButtonView')
LPCaptionBarAccessoryView = _Class('LPCaptionBarAccessoryView')
LPTextView = _Class('LPTextView')
LPImageView = _Class('LPImageView')
LPImageStackView = _Class('LPImageStackView')
LPVerticalTextStackView = _Class('LPVerticalTextStackView')
LPIndeterminateProgressSpinnerView = _Class('LPIndeterminateProgressSpinnerView')
LPMultipleImageView = _Class('LPMultipleImageView')
LPHorizontalCaptionPairView = _Class('LPHorizontalCaptionPairView')
LPVideoView = _Class('LPVideoView')
LPYouTubeVideoView = _Class('LPYouTubeVideoView')
LPStreamingVideoView = _Class('LPStreamingVideoView')
LPPlayButtonControl = _Class('LPPlayButtonControl')
LPPlaceholderPlayButtonControl = _Class('LPPlaceholderPlayButtonControl')
LPStreamingAudioPlayButtonControl = _Class('LPStreamingAudioPlayButtonControl')
LPiTunesPlayButtonControl = _Class('LPiTunesPlayButtonControl')
LPAVPlayerViewController = _Class('LPAVPlayerViewController')
|
python
|
### Worker threads and payloads for sending web requests
from enum import Enum
from threading import Thread
import requests
from requests.adapters import HTTPAdapter
from bs4 import BeautifulSoup as bs
from urllib.parse import urljoin
# The Worker class is reused for both stages of execution, so it needs to be able to differentiate itself.
class WorkerType(Enum):
CSS_SCRAPER = 1
CSS_GETTER = 2
class HTTPWorker(Thread):
"""Worker thread to send GET requests to all websites, gathering their styling"""
def __init__(self, request_queue, type):
Thread.__init__(self)
# Request queue to store links
self.queue = request_queue
# Different worker types have different payloads to execute on their data
self.type = type
if type == WorkerType.CSS_SCRAPER:
self.payload = scrape_style
elif type == WorkerType.CSS_GETTER:
self.payload = scrape_urls
else:
# If somehow something else is passed in, throw an error
raise TypeError("Invalid Worker Type")
self.results = {}
# Initialising HTTP session to get websites, with a real-looking useragent and 5 maximum retries to keep it quick
self.session = requests.Session()
self.session.mount("https://", HTTPAdapter(max_retries=3))
self.session.mount("https://", HTTPAdapter(max_retries=3))
self.session.headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36 Edg/100.0.1185.50"
def run(self):
# Each worker thread remains alive until there are no links left
while not self.queue.empty():
content = self.queue.get()
try:
# Slightly different handling is required for different types due to datatypes
# Could probably be fixed
if self.type == WorkerType.CSS_SCRAPER:
response = self.payload(self.session,content)
self.results[content] = response
elif self.type == WorkerType.CSS_GETTER:
response = self.payload(self.session, content)
self.results[content[0]] = response
except:
continue
finally:
# Mark the website as complete even if error occurs so other threads to not try to get it repeatedly
self.queue.task_done()
def join(self,timeout=None):
# Gracefull exit thread by closing http session, would probably be done automatically anyway
self.session.close()
Thread.join(self, timeout)
### PAYLOADS FUNCTIONS ###
def scrape_style(session, url):
# Returns tuple of external css links and code from <style> tags
print("Getting "+url)
html = session.get(url,timeout=3).text
# Using beautifulsoup to parse html and extra style/link tags
soup = bs(html,'html.parser')
css_files = []
css_tags = []
# Grab all in-html styling
#!TODO Implement some inline styling, not sure if many websites use this
for css in soup.find_all("style"):
css_tags.append(css.text)
for css in soup.find_all("link"):
if css.attrs.get("href"):
css_url=urljoin(url, css.attrs.get("href"))
if "css" in css_url.lower():
css_files.append(css_url)
# Return both results in a tuple, the function that receives the results can deal with that
return (css_files,css_tags)
def scrape_urls(session,data):
# Simply request all external stylesheets and add contents to one long string for processing later
print("Getting external styles for "+data[0])
res = ""
for url in data[1]:
res += session.get(url, timeout=3).text + "\n"
return res
|
python
|
import os
from datetime import datetime
from flask import Flask, request, flash, url_for, redirect, \
render_template, abort, send_from_directory
from doctor_api.app import doctor_api_bp
from patient_api.app import patient_api_bp
app = Flask(__name__)
app.config.from_pyfile('flaskapp.cfg')
app.register_blueprint(doctor_api_bp, url_prefix='/doctor-api')
app.register_blueprint(patient_api_bp, url_prefix='/patient-api')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/<path:resource>')
def serveStaticResource(resource):
return send_from_directory('static/', resource)
@app.route("/test")
def test():
return "<strong>It's Alive!</strong>"
if __name__ == '__main__':
app.run(app.config['IP'], app.config['PORT'], debug=True)
|
python
|
# -*- coding: utf-8 -*-
# trump-net (c) Ian Dennis Miller
from flask_security import ConfirmRegisterForm
from flask_wtf.recaptcha import RecaptchaField
class ExtendedRegisterForm(ConfirmRegisterForm):
recaptcha = RecaptchaField()
def validate(self):
rv = ConfirmRegisterForm.validate(self)
if not rv:
return False
return True
|
python
|
from crestdsl.model import * # bad practice, but used for the evaluation of commands
from .simulator import Simulator
import logging
logger = logging.getLogger(__name__)
import io
try:
import colored
from colored import stylize
color_enabled = True
except ImportError:
color_enabled = False
except io.UnsupportedOperation:
color_enabled = False
logger.error("There is an error in the 'colored' package. They use 'fileno'. I guess we have to wait for a fix.")
import random
import sys
class InteractiveSimulator(Simulator):
"""
This is a simulator will stop every time two transitions are enabled
in the same entity at the same time and prompt the user for what to do.
Next to choosing a transition,uUsers can perform various actions
(e.g. inspect variables, plot the system or stop the simulation.
"""
def select_transition_to_trigger(self, entity):
""" Override the (random) transition selection procedure. This one asks the user for input."""
transitions_from_current_state = [t for t in get_transitions(entity) if t.source is entity.current]
enabled_transitions = [t for t in transitions_from_current_state if self._get_transition_guard_value(t)]
if len(enabled_transitions) == 1:
return enabled_transitions[0]
elif len(enabled_transitions) > 1:
if color_enabled:
return self.prompt_transition_selection(entity, enabled_transitions)
else:
return self.prompt_transition_selection_no_colored(entity, enabled_transitions)
else:
return None
def prompt_transition_selection_no_colored(self, entity, enabled_transitions):
pad = 1 if len(enabled_transitions) <= 10 else 2
transitions_texts = [idx.rjust(pad) + f" ... {trans._name} (transition to '{trans.target._name}')" for idx, trans in enumerate(enabled_transitions)]
transitions_list = "\n".join(transitions_texts)
longtext = f"""
Non-Determinism detected
There are multiple enabled transitions in entity: {str(entity)}
(Current time: {self.global_time} -- Current automaton state: {entity.current._name})
Choose one of the following transitions by entering the according number:
{transitions_list}
Other commands:
r ... choose a transition randomly
p ... plot the system
pe ... plot the entity in which non-determinism occurs
q! ... to exit the script (not recommended in Jupyter mode)
Any other input will be interpreted.
This means you can use it to e.g. inspect ports values.
The entity {str(entity)} is bound to the variable 'entity'.
Example: entity.my_port.value will print the value of port my_port.
"""
print(longtext)
while True:
prompt = "Your choice: "
userinput = input(prompt).strip() # read input
if userinput == "p":
self.plot()
elif userinput == "pe":
self.plot(entity=entity)
elif userinput == "r":
return random.choice(enabled_transitions)
elif userinput == "q!":
sys.exit()
elif userinput in [str(idx) for idx in range(len(enabled_transitions))]:
choice = int(userinput)
return enabled_transitions[choice] # <<<<< This is the exit of the function, otherwise we're trapped !!
else:
try:
print(eval(userinput))
except:
text = f"I don't understand the input: " + \
userinput + \
f" (Please try again!)"
print(text)
def prompt_transition_selection(self, entity, enabled_transitions):
pad = 1 if len(enabled_transitions) <= 10 else 2
transitions_texts = [stylize(idx, colored.attr("bold")).rjust(pad) + f" ... {trans._name} (transition to '{trans.target._name}')" for idx, trans in enumerate(enabled_transitions)]
transitions_list = "\n".join(transitions_texts)
longtext = f"""
{stylize(' Non-Determinism detected ', colored.fg('black') + colored.bg('dark_orange') + colored.attr('bold'))}
There are multiple enabled transitions in entity: {stylize(' '+str(entity)+' ', colored.fg('black') + colored.bg('yellow_1') + colored.attr('bold'))}
(Current time: {stylize(self.global_time, colored.attr("bold"))} -- Current automaton state: {stylize(entity.current._name, colored.attr("bold"))})
{stylize('Choose one of the following transitions by entering the according number:', colored.attr('underlined'))}
{transitions_list}
{stylize('Other commands:', colored.attr('underlined'))}
{stylize('r', colored.attr("bold"))} ... choose a transition randomly
{stylize('p', colored.attr("bold"))} ... plot the system
{stylize('pe', colored.attr("bold"))} ... plot the entity in which non-determinism occurs
{stylize('q!', colored.attr("bold"))} ... to exit the script (not recommended in Jupyter mode)
{stylize('Any other input will be interpreted.', colored.attr('underlined'))}
This means you can use it to e.g. inspect ports values.
The entity {stylize(str(entity), colored.attr('bold'))} is bound to the variable {stylize('entity', colored.attr('bold'))}.
{stylize('Example:', colored.attr('underlined'))} entity.my_port.value will print the value of port my_port.
"""
print(longtext)
while True:
prompt = "Your choice: "
userinput = input(prompt).strip() # read input
if userinput == "p":
self.plot()
elif userinput == "pe":
self.plot(entity=entity)
elif userinput == "r":
return random.choice(enabled_transitions)
elif userinput == "q!":
sys.exit()
elif userinput in [str(idx) for idx in range(len(enabled_transitions))]:
choice = int(userinput)
return enabled_transitions[choice] # <<<<< This is the exit of the function, otherwise we're trapped !!
else:
try:
print(eval(userinput))
except:
text = stylize(f"I don't understand the input: ", colored.fg("red") + colored.attr("bold")) + \
userinput + \
stylize(f" (Please try again!)", colored.fg("red") + colored.attr("bold"))
print(text)
|
python
|
#
# Constant Price Market Making Simulator
#
# simulate different liquidity provision and trading strategies
#
from typing import Tuple
import csv
import numpy as np
import pandas as pd
from numpy.random import binomial, default_rng
# TODO: switch to decimal type and control quantization. numeric errors will kill us quickly
class CPMM(object):
def __init__(self, fee_fraction = 0, fee_to_liquidity_fraction = 0) -> None:
# assert(fee_fraction >= fee_to_liquidity_fraction)
# amount of initial liquidity provided
self.initial_liquidity = 0
# total amount of liquidity
self.liquidity = 0
# total amount of collateral token
self.lp_token = 0
# yes tokens in the pool
self.lp_yes = 0
# no tokens in the pool
self.lp_no = 0
# outstanding tokens held by LP
self.outstanding_yes = 0
self.outstanding_no = 0
self.fee_pool = 0
self.history = []
self.fee_fraction = fee_fraction
self.fee_to_liquidity_fraction = fee_to_liquidity_fraction # how much from the fee is reinvested to liqudity provision
def create_event(self, intial_liquidity, initial_yes_to_no = 1) -> Tuple[int, float]:
assert(initial_yes_to_no > 0)
self.initial_liquidity = intial_liquidity
rv = self._add_liquidity(intial_liquidity, initial_yes_to_no)
n_p = self.lp_yes / self.lp_no
# print(f"invariant P {initial_yes_to_no} {n_p}")
assert(abs(initial_yes_to_no - n_p) < 0.000001)
return rv
def add_liquidity(self, amount) -> Tuple[int, float]:
assert(self.lp_token > 0)
# yes to no must be invariant when liquidity is added
p = self.lp_yes / self.lp_no
rv = self._add_liquidity(amount, p)
n_p = self.lp_yes / self.lp_no
# assert invariant, we use float and disregard rounding so must be within e ~ 0
# print(f"invariant P {p} {n_p}")
assert(abs(p - n_p) < 0.000001)
return rv
def _add_liquidity(self, amount, yes_to_no) -> Tuple[int, float]:
# print("adding liquidity:", amount)
self.liquidity += amount
self.lp_token += amount
# get token type from the ratio
type = 1 if yes_to_no >= 1 else 0
if type:
# more into YES bucket, NO is returned
old_lp_no = self.lp_no
self.lp_no = (amount + self.lp_yes) / yes_to_no
self.lp_yes += amount
tokens_return = amount + old_lp_no - self.lp_no
self.outstanding_no += tokens_return
else:
# more into NO bucket, YES is returned
old_lp_yes = self.lp_yes
self.lp_yes = (amount + self.lp_no) * yes_to_no
self.lp_no += amount
tokens_return = amount + old_lp_yes - self.lp_yes
self.outstanding_yes += tokens_return
entry = ["add", "liquidity", amount, 0, yes_to_no, 0, tokens_return, self.lp_yes, self.lp_no, self.lp_token, self.liquidity, self.fee_pool, 0 ,0]
self._add_history(entry)
# should return amount of outcome token
return (type, amount)
# def remove_liquidity(amount):
def buy_token(self, type, original_amount) -> Tuple[int, float]: #yes=1 | no = 0
# take fee before any operation and store in fee_pool
fee = original_amount * self.fee_fraction
amount = original_amount - fee
self.fee_pool += fee
# adding fee_to_liquidity fraction to liquidity fee pool
# note: liquidity is provided before buy such that added liquidity is available for current transaction
if (self.fee_to_liquidity_fraction > 0):
reinvest_fee = fee * self.fee_to_liquidity_fraction
self.add_liquidity(reinvest_fee)
# keep invariant
k = (self.lp_yes * self.lp_no)
# add liquidity
self.lp_token += amount
if type:
tokens_return, x = self.calc_buy(type, amount)
buy_price_yes = amount / tokens_return
# calc slippage
slippage_yes = self.calc_slippage(type, amount)
assert (slippage_yes > 0), f"slippage_yes {slippage_yes} <= 0"
# remove returned token form the pool, keep all no tokens
self.lp_yes += x
self.lp_no += amount
entry = ["buy", "yes", original_amount, fee, buy_price_yes, slippage_yes, tokens_return, self.lp_yes, self.lp_no, self.lp_token, self.liquidity, self.fee_pool, 0, 0]
else:
tokens_return, x = self.calc_buy(type, amount)
buy_price_no = amount / tokens_return
slippage_no = self.calc_slippage(type, amount)
assert (slippage_no > 0), f"slippage_no {slippage_no} <= 0"
# remove returned token form the pool, keep all yes tokens
self.lp_no += x
self.lp_yes += amount
entry = ["buy", "no", original_amount, fee, buy_price_no, slippage_no, tokens_return, self.lp_yes, self.lp_no, self.lp_token, self.liquidity, self.fee_pool, 0, 0]
# assert invariant, we use float and disregard rounding so must be within e ~ 0
inv_div = abs(k - (self.lp_yes * self.lp_no))
# use variable epsilon - float numbers suck due to scaling
inv_eps = min(self.lp_no, self.lp_yes) / 100000000
if inv_div > inv_eps :
print(f"invariant K {k} {self.lp_yes * self.lp_no} == {inv_div}, lp_yes {self.lp_yes} lp_no {self.lp_no} eps {inv_eps}")
assert(inv_div < inv_eps)
impermanent_loss = self.calc_impermanent_loss()
assert(impermanent_loss >= 0)
# outstanding yes/no token may be converted at event outcome to reward or immediately traded
outstanding_token = self.calc_outstanding_token()
# impermanent loss at last position in history entry
entry[-2] = impermanent_loss
entry[-1] = outstanding_token[1]
self._add_history(entry)
return (type, tokens_return)
def calc_withdrawable_liquidity(self) -> float:
# collateral taken from the pool and tokens returned when adding liquidity
return min(self.lp_yes + self.outstanding_yes, self.lp_no + self.outstanding_no)
def calc_payout(self) -> float:
# how big is reward after all liquidity is removed
return self.lp_token - self.calc_withdrawable_liquidity()
def calc_outstanding_token(self) -> Tuple[int, float]:
# outcome tokens going to LP on top of removed liquidity
withdraw_token = self.calc_withdrawable_liquidity()
total_yes = self.lp_yes + self.outstanding_yes
total_no = self.lp_no + self.outstanding_no
if total_yes > total_no:
outstanding_token = (1, total_yes - withdraw_token)
else:
outstanding_token = (0, total_no - withdraw_token)
return outstanding_token
def calc_impermanent_loss(self) -> float:
withdraw_token = self.calc_withdrawable_liquidity()
return self.liquidity - withdraw_token
def calc_buy(self, type, amount) -> Tuple[float, float]:
k = (self.lp_yes * self.lp_no)
if type:
x = k / (self.lp_no + amount) - self.lp_yes
else:
x = k / (self.lp_yes + amount) - self.lp_no
# (tokens returned to the user, amm pool delta)
return amount - x, x
def calc_marginal_price(self, type) -> float:
pool_total = (self.lp_no + self.lp_yes)
return (self.lp_no if type else self.lp_yes) / pool_total
def calc_slippage(self, type, amount) -> float:
tokens_return, _ = self.calc_buy(type, amount)
buy_price = amount / tokens_return
marginal_price = self.calc_marginal_price(type)
return (buy_price - marginal_price) / buy_price
@staticmethod
def calc_british_odds(returned_tokens, amount) -> float:
# british odds https://www.investopedia.com/articles/investing/042115/betting-basics-fractional-decimal-american-moneyline-odds.asp
# shows the reward on top of stake as a decimal fraction to the stake
# (TODO: we could use Fraction class of python for nice odds representation)
# may be negative when due to cpmm inefficiencies
return (returned_tokens - amount) / amount
# def sell_token(type, amount):
# def get_buy_price_yes():
# def get_sell_price_yes():
_csv_headers = [
"activity", "type", "amount", "fee", "token_buy_sell_price",
"slippage", "returned tokens", "lp_yes", "lp_no", "lp_token",
"liquidity", "fee_pool", "impermanent_loss", "loss_outstanding_tokens"
]
@property
def history_as_dataframe(self) -> pd.DataFrame:
return pd.DataFrame(data=self.history, columns=CPMM._csv_headers)
def save_history(self, name) -> None:
df = self.history_as_dataframe
with open(name, "wt") as f:
df.to_csv(f, index=False, quoting=csv.QUOTE_NONNUMERIC)
def _add_history(self, entry) -> None:
# check entry size
assert(len(entry) == len(CPMM._csv_headers))
self.history.append(entry)
def run_experiment(name, cpmm: CPMM, n, prior_dist, betting_dist):
# TODO: must have realistic model for betting behavior, for example
# total bets volume cannot cross % of liquidity
# individual bet cannot have slippage > 1% etc.
bet_outcomes = prior_dist(n)
bet_amounts = betting_dist(n)
print(f"{name}: bet outcomes N/Y {np.bincount(bet_outcomes)}")
for b, amount in zip(bet_outcomes, bet_amounts):
cpmm.buy_token(b, amount)
# print(cpmm.history)
cpmm.save_history(f"{name}.csv")
def main():
rng = default_rng()
# experiment 1
# 1000 rounds, initial liquidity 50:50 1000 EVNT, betters prior 50:50, bets integer uniform range [1, 100]
cpmm = CPMM()
cpmm.create_event(1000)
run_experiment(
"experiment1",
cpmm,
1000,
lambda size: rng.binomial(1, 0.5, size),
lambda size: rng.integers(1, 100, endpoint=True, size=size)
)
# experiment 2
# 1000 rounds, initial liquidity 50:50 1000 EVNT, betters prior 70:30, bets integer uniform range [1, 100]
cpmm = CPMM()
cpmm.create_event(1000)
run_experiment(
"experiment2",
cpmm,
1000,
lambda size: rng.binomial(1, 0.7, size),
lambda size: rng.integers(1, 100, endpoint=True, size=size)
)
# experiment 3
# 1000 rounds, initial liquidity 50:50 1000 EVNT, betters prior 70:30, bets integer uniform range [1, 100]
# fee 2% taken and not added to liquidity pool
cpmm = CPMM(fee_fraction=0.02)
cpmm.create_event(1000)
run_experiment(
"experiment3",
cpmm,
1000,
lambda size: rng.binomial(1, 0.7, size),
lambda size: rng.integers(1, 100, endpoint=True, size=size)
)
# experiment 4
# 1000 rounds, initial liquidity 50:50 1000 EVNT, betters prior 50:50, bets integer uniform range [1, 100]
# fee 2% taken and 50% added to liquidity pool
cpmm = CPMM(fee_fraction=0.02, fee_to_liquidity_fraction=0.5)
cpmm.create_event(1000)
run_experiment(
"experiment4",
cpmm,
1000,
lambda size: rng.binomial(1, 0.5, size),
lambda size: rng.integers(1, 100, endpoint=True, size=size)
)
# experiment 5
# 1000 rounds, initial liquidity 1:3 1000 EVNT, betters prior 50:50, bets integer uniform range [1, 100]
# fee 2% taken and 50% added to liquidity pool
cpmm = CPMM(fee_fraction=0.02, fee_to_liquidity_fraction=0.5)
cpmm.create_event(1000)
run_experiment(
"experiment5",
cpmm,
1000,
lambda size: rng.binomial(1, 0.5, size),
lambda size: rng.integers(1, 100, endpoint=True, size=size)
)
if __name__ == "__main__":
main()
|
python
|
load("@bazel_gazelle//:deps.bzl", "go_repository")
def load_external_go_repositories():
########## Server request handling ###############
go_repository(
name = "com_github_andybalholm_brotli",
importpath = "github.com/andybalholm/brotli",
commit = "1d750214c25205863625bb3eb8190a51b2cef26d", # Sep 22, 2021
)
go_repository(
name = "com_github_valyala_bytebufferpool",
importpath = "github.com/valyala/bytebufferpool",
commit = "18533face0dfe7042f8157bba9010bd7f8df54b1", # Nov 4, 2020
)
go_repository(
name = "com_github_klauspost_compress",
importpath = "github.com/klauspost/compress",
tag = "v1.14.2", # Jan 25, 2022
)
go_repository(
name = "com_github_valyala_fasthttp",
importpath = "github.com/valyala/fasthttp",
tag = "v1.33.0",
)
go_repository(
name = "com_github_buaazp_fasthttprouter",
importpath = "github.com/buaazp/fasthttprouter",
tag = "979d6e516ec324575737805deabe0303794c58bd", # Jan 9, 2019
)
########## Logging ###############
go_repository(
name = "org_uber_go_atomic",
importpath = "go.uber.org/atomic",
tag = "v1.9.0", # Jul 15, 2021
)
go_repository(
name = "org_uber_go_multierr",
importpath = "go.uber.org/multierr",
tag = "v1.7.0", # May 6, 2021
)
go_repository(
name = "org_uber_go_zap",
importpath = "go.uber.org/zap",
tag = "v1.21.0", # Feb 7, 2022
)
|
python
|
from datapackage_pipelines.wrapper import ingest, spew
from datapackage_pipelines.utilities.resources import PROP_STREAMING
from nli_z3950.load_marc_data import get_marc_records_schema, parse_record
from pymarc.marcxml import parse_xml_to_array
import datetime, json
def get_resource(parameters, stats):
stats['search_rows'] = 0
for filenum in parameters['filenums']:
filepath = parameters['files-path-template'].format(filenum=filenum)
search_id = 'neaman{}'.format(filenum)
with open(filepath) as f:
for record_num, record in enumerate(parse_xml_to_array(f)):
row = parse_record(record)
migdar_id = '{}-{}'.format(search_id, record_num)
row.update(migdar_id=migdar_id, first_ccl_query='neaman{}.xml'.format(filenum),
last_query_datetime=datetime.datetime.now(),
json=json.loads(row['json']))
stats['search_rows'] += 1
yield row
def get_resources(resources, parameters, stats):
for resource in resources:
yield resource
yield get_resource(parameters, stats)
def get_datapackage(datapackage):
schema = get_marc_records_schema()
schema['fields'] += [{'name': 'migdar_id', 'type': 'string'},
{'name': 'first_ccl_query', 'type': 'string'},
{'name': 'last_query_datetime', 'type': 'datetime'},
{'name': 'json', 'type': 'object'}]
datapackage['resources'].append({'name': 'search_haifa_files',
'path': 'search_haifa_files.csv',
PROP_STREAMING: True,
'schema': schema})
return datapackage
def main():
parameters, datapackage, resources, stats = ingest() + ({},)
spew(get_datapackage(datapackage),
get_resources(resources, parameters, stats),
stats)
if __name__ == '__main__':
main()
|
python
|
import os
import sqlite3
class DatabaseRepository:
def __init__(self, database_file, schema_file):
db_is_new = not os.path.exists(database_file)
self.connection = sqlite3.connect(database_file, check_same_thread=False)
if db_is_new:
with open(schema_file, 'rt') as f:
schema = f.read()
self.connection.executescript(schema)
def close_db(self):
self.connection.commit()
self.connection.close()
|
python
|
"""Utility methods for interacting with Kubernetes API server.
This module is merged into the `metalk8s_kubernetes` execution module,
by virtue of its `__virtualname__`.
"""
from __future__ import absolute_import
from salt.exceptions import CommandExecutionError
import salt.utils.files
import salt.utils.templates
import salt.utils.yaml
MISSING_DEPS = []
try:
import kubernetes.client
from kubernetes.client.rest import ApiException
except ImportError:
MISSING_DEPS.append("kubernetes.client")
try:
import kubernetes.config
except ImportError:
MISSING_DEPS.append("kubernetes.config")
try:
from urllib3.exceptions import HTTPError
except ImportError:
MISSING_DEPS.append("urllib3")
__virtualname__ = "metalk8s_kubernetes"
def __virtual__():
if MISSING_DEPS:
return False, "Missing dependencies: {}".format(", ".join(MISSING_DEPS))
return __virtualname__
def get_kubeconfig(**kwargs):
"""
Get the kubeconfig and context from args or directly pillar or from
salt-master configuration.
Pillar value from `metalk8s.api_server.kubeconfig` and
`metalk8s.api_server.context`
Salt master config from `kubernetes.kubeconfig` and `kubernetes.context`
CLI Examples:
.. code-block:: bash
salt-call metalk8s_kubernetes.get_kubeconfig kubeconfig="/etc/kubernetes/admin.conf"
salt-call metalk8s_kubernetes.get_kubeconfig
Code Example:
.. code-block:: python
kubeconfig, context = __salt__['metalk8s_kubernetes.get_kubeconfig'](**kwargs)
"""
pillar_dict = __pillar__.get("metalk8s", {}).get("api_server", {})
kubeconfig = (
kwargs.get("kubeconfig")
or pillar_dict.get("kubeconfig")
or __salt__["config.option"]("kubernetes.kubeconfig")
)
context = (
kwargs.get("context")
or pillar_dict.get("context")
or __salt__["config.option"]("kubernetes.context")
or None
)
return kubeconfig, context
def get_version_info(**kwargs):
"""Retrieve the API server version information, as a dict.
The result contains various version details to be as exhaustive as
possible.
CLI Example:
salt '*' metalk8s_kubernetes.get_version_info
"""
kubeconfig, context = get_kubeconfig(**kwargs)
api_client = kubernetes.config.new_client_from_config(
config_file=kubeconfig, context=context
)
api_instance = kubernetes.client.VersionApi(api_client=api_client)
try:
version_info = api_instance.get_code()
except (ApiException, HTTPError) as exc:
raise CommandExecutionError("Failed to get version info") from exc
return version_info.to_dict()
def ping(**kwargs):
"""Check connection with the API server.
Returns True if a request could be made, False otherwise.
CLI Example:
salt '*' metalk8s_kubernetes.ping
"""
try:
get_version_info(**kwargs)
except CommandExecutionError:
return False
return True
def read_and_render_yaml_file(source, template, context=None, saltenv="base"):
"""
Read a yaml file and, if needed, renders that using the specifieds
templating. Returns the python objects defined inside of the file.
"""
sfn = __salt__["cp.cache_file"](source, saltenv)
if not sfn:
raise CommandExecutionError("Source file '{0}' not found".format(source))
if not context:
context = {}
with salt.utils.files.fopen(sfn, "r") as src:
contents = src.read()
if template:
if template in salt.utils.templates.TEMPLATE_REGISTRY:
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
contents,
from_str=True,
to_str=True,
context=context,
saltenv=saltenv,
grains=__grains__,
pillar=__pillar__,
salt=__salt__,
opts=__opts__,
)
if not data["result"]:
# Failed to render the template
raise CommandExecutionError(
"Failed to render file path with error: "
"{0}".format(data["data"])
)
contents = data["data"].encode("utf-8")
else:
raise CommandExecutionError(
"Unknown template specified: {0}".format(template)
)
return salt.utils.yaml.safe_load(contents)
def get_service_endpoints(service, namespace, kubeconfig):
error_tpl = "Unable to get kubernetes endpoints for {} in namespace {}"
try:
endpoint = __salt__["metalk8s_kubernetes.get_object"](
name=service,
kind="Endpoints",
apiVersion="v1",
namespace=namespace,
kubeconfig=kubeconfig,
)
if not endpoint:
raise CommandExecutionError("Endpoint not found")
except CommandExecutionError as exc:
raise CommandExecutionError(error_tpl.format(service, namespace)) from exc
try:
result = []
for address in endpoint["subsets"][0]["addresses"]:
# Extract hostname, ip and node_name
res_ep = {
k: v for k, v in address.items() if k in ["hostname", "ip", "node_name"]
}
# Add ports info to result dict
res_ep["ports"] = {
port["name"]: port["port"] for port in endpoint["subsets"][0]["ports"]
}
result.append(res_ep)
except (AttributeError, IndexError, KeyError, TypeError) as exc:
raise CommandExecutionError(error_tpl.format(service, namespace)) from exc
return result
|
python
|
import logging.config
import configparser
import metrics
from datetime import datetime
import urllib.request
from unittest import mock
from requests import request
from aiohttp.web import Response
from jiracollector import JiraCollector
import pytest
config = configparser.ConfigParser()
config.read('metrics.ini')
logging.config.fileConfig(config.get('logging'
,'config_file'), defaults=None, disable_existing_loggers=True)
logger = logging.getLogger()
# Asserts that "happy-path" works, i.e the returned metrics from JiraCollector
# is correctly converted and internal metrics are added to the cached metrics
@mock.patch('jiracollector.JiraCollector.__init__',mock.Mock(return_value=None))
@mock.patch('jiracollector.JiraCollector.collect')
def test_collect_metrics(mock_collector):
metrics_dict = {
"jira_total_done{project_name=\"BIP\"}":"42"
}
mock_collector.return_value = metrics_dict
metrics.serviceIsReady = False
metrics.collectJiraMetrics()
assert metrics.serviceIsReady == True
assert metrics.cachedMetrics == "jira_total_done{project_name=\"BIP\"} 42\njira_total_number_of_metrics 1\njira_total_execution_time_seconds 0\n"
# Asserts that an exception is raised if init or collect from Jiracollector raises an exception
@mock.patch('jiracollector.JiraCollector.__init__',side_effect=mock.Mock(side_effect=Exception("Just for testing exception Exception")),
)
def test_collect_metrics_raises_exception_if_exception_from_jiracollector(mock_collector):
metrics.serviceIsReady = False
with pytest.raises(Exception):
metrics.collectJiraMetrics()
@mock.patch('requests.request')
def test_alive_always_returns_200(mock_request):
response = metrics.alive(mock_request)
assert response.status == 200
@mock.patch('requests.request')
def test_ready_returns_503_or_200_depending_on_serviceIsReady(mock_request):
response = metrics.ready(mock_request)
assert response.status == 503
metrics.serviceIsReady = True
response = metrics.ready(mock_request)
assert response.status == 200
|
python
|
"""
MIT License
mift - Copyright (c) 2021 Control-F
Author: Mike Bangham (Control-F)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software, 'mift', and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from PyQt5.QtCore import pyqtSignal, QThread
import os
from os.path import join as pj
from os.path import basename, abspath
import zipfile
import tarfile
import logging
from src.utils import resource_path
class ExtractArchiveThread(QThread):
finishedSignal = pyqtSignal(str)
progressSignal = pyqtSignal(str)
def __init__(self, parent, files_to_extract, save_dir, archive, maintain_dir_structure=False, key_dir=None):
QThread.__init__(self, parent)
self.files_to_extract = files_to_extract
self.save_dir = save_dir
self.archive = archive
self.maintain_dir_structure = maintain_dir_structure
self.key_dir = key_dir
def run(self):
os.makedirs(self.save_dir, exist_ok=True)
if zipfile.is_zipfile(self.archive):
self.progressSignal.emit('Archive is zipfile, processing members...')
with zipfile.ZipFile(self.archive, 'r') as zip_obj:
archive_members = zip_obj.namelist()
if not self.maintain_dir_structure:
for file_member in self.files_to_extract: # get the index of the file in the archive members
file_idxs = [i for i, archive_member in enumerate(archive_members)
if file_member in archive_member]
if file_idxs:
self.progressSignal.emit('Found {} to extract from the archive. '
'Extracting...'.format(len(file_idxs)))
for idx in file_idxs:
if len(basename(archive_members[idx])) != 0:
file = pj(self.save_dir, '{}'.format(basename(archive_members[idx])))
with open(file, 'wb') as file_out:
zip_fmem = zip_obj.read(archive_members[idx])
file_out.write(zip_fmem)
else:
self.progressSignal.emit('Extracting files with base dir: {}/'.format(self.key_dir))
for archive_member in archive_members:
if self.key_dir in archive_member:
if archive_member.endswith('/'):
os.makedirs(self.save_dir+'/'+archive_member, exist_ok=True)
else:
file = abspath(self.save_dir+'/{}'.format(archive_member))
try:
with open(file, 'wb') as file_out:
zip_fmem = zip_obj.read(archive_member)
file_out.write(zip_fmem)
except:
logging.error('cant copy file: {}'.format(file))
else:
self.progressSignal.emit('Archive is tarfile, processing members...')
if not self.maintain_dir_structure:
with tarfile.open(self.archive, 'r') as tar_obj:
archive_members = tar_obj.getnames()
for file_member in self.files_to_extract: # get the index of the file in the archive members
file_idxs = [i for i, archive_member in enumerate(archive_members)
if file_member in archive_member]
if file_idxs:
self.progressSignal.emit('Found {} to extract from the archive. '
'Extracting...'.format(len(file_idxs)))
for idx in file_idxs:
if len(basename(archive_members[idx])) != 0:
file = pj(self.save_dir, '{}'.format(basename(archive_members[idx])))
with open(file, 'wb') as file_out:
tar_fmem = tar_obj.extractfile(archive_members[idx])
file_out.write(tar_fmem.read())
else:
self.progressSignal.emit('Extracting files with base dir: {}/'.format(self.key_dir))
with tarfile.open(self.archive, 'r') as tar_obj:
for member in tar_obj:
if self.key_dir in member.name:
if member.isdir():
os.makedirs(self.save_dir+'/'+member.name.replace(':', ''), exist_ok=True)
else:
file = self.save_dir+'/{}'.format(member.name.replace(':', ''))
try:
with open(file, 'wb') as file_out:
tar_fmem = tar_obj.extractfile(member)
file_out.write(tar_fmem.read())
except:
logging.error('cant copy file: {}'.format(file))
self.finishedSignal.emit('Archive processed!')
|
python
|
from constant_sum import *
if __name__ == "__main__":
t = 56
n = 510
s = 510
for i in range(500):
l = (T_len(t,n-i,s))
if l>0:
print(log(l,2),t, n-i, s, t*n - s+i, t*n )
#for b in range(t, 1, -1):
# p = 0
# k = min(n*(t - b + 1), s)
# #k = s
# print("b\ts\tlen\tbj\tbj")
# for i in range(0, k+1):
# aux = T_len(b-1, n, s- i)
# p += aux
# bjota = bj(i, b-1, n, s)
# zjota = zj(i, b-1, n, s)
# print("%d\t%d\t%d\t%d\t%d\t%d"%(b-1, s-i, aux, p, bjota, zjota))
"""
t(45,145,952)
Cv = 952
Cs =
Cg =
vs
t(45, 72, 1006)
Cv = 1006
Cs =
Cg =
"""
|
python
|
from .statement_base import Statement
import sasoptpy
class DropStatement(Statement):
@sasoptpy.class_containable
def __init__(self, *elements):
super().__init__()
for i in elements:
self.elements.append(i)
self.keyword = 'drop'
def append(self, element):
pass
def _defn(self):
s = f'{self.keyword} '
cons = []
for c in self.elements:
cons.extend(c._get_name_list())
s += ' '.join(cons) + ';'
return s
@classmethod
def model_drop_constraint(cls, _, c):
if sasoptpy.core.util.is_droppable(c):
st = DropStatement(c)
return st
@classmethod
def drop_constraint(cls, *constraints):
if all([sasoptpy.core.util.is_droppable(c) for c in constraints]):
st = DropStatement(*constraints)
class RestoreStatement(DropStatement):
def __init__(self, *elements):
super().__init__(*elements)
self.keyword = 'restore'
@classmethod
def restore_constraint(cls, *constraints):
if all([sasoptpy.core.util.is_droppable(c) for c in constraints]):
st = RestoreStatement(*constraints)
|
python
|
R = int(input())
print(2*3.141592653589793*R)
|
python
|
import numpy as np
import pandas as pd
import sys
from typing import Literal
from arch import arch_model
import warnings
from sklearn.exceptions import ConvergenceWarning
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ConvergenceWarning)
def get_rolling_vol_forecasts(return_series,
model,
horizon : int=21,
fitting_end_date : str = "2021-01-01",
#type_forecast : Literal['rolling','recursive'] = 'rolling'
):
print(f"\nFitting rolling {model.volatility} model with a {model.distribution}.")
index = return_series.index
start_loc = 0
end_loc = np.where(index > fitting_end_date)[0].min()
n_forecasts = 2+ np.where(index == index[-1])[0].min() - end_loc # find number of forecasts to make
forecasts = {}
print(f"Number of forecasts: {n_forecasts}")
for i in range(n_forecasts):
sys.stdout.write(".")
sys.stdout.flush()
#if type_forecast == 'rolling':
res = model.fit(first_obs=i, last_obs=i + end_loc, disp="off")
#else:
# res = model.fit(last_obs=i + end_loc, disp="off")
temp = np.sqrt(res.forecast(horizon=horizon, reindex=False).variance)
fcast = temp.iloc[0]
forecasts[fcast.name] = fcast
vol_forecasts = pd.DataFrame(forecasts).T.multiply(np.sqrt(252))
return vol_forecasts
|
python
|
from django.templatetags.static import static as get_static_url
from django.shortcuts import redirect
from .exceptions import UnknownMessageTypeError
from .models import Dispatch
from .signals import sig_unsubscribe_failed, sig_mark_read_failed
def _generic_view(message_method, fail_signal, request, message_id, dispatch_id, hashed, redirect_to=None):
if redirect_to is None:
redirect_to = '/'
try:
dispatch = Dispatch.objects.select_related('message').get(pk=dispatch_id)
if int(message_id) != dispatch.message_id:
raise ValueError()
message = dispatch.message
except (Dispatch.DoesNotExist, ValueError):
pass
else:
try:
message_type = message.get_type()
expected_hash = message_type.get_dispatch_hash(dispatch_id, message_id)
method = getattr(message_type, message_method)
return method(
request, message, dispatch,
hash_is_valid=(expected_hash == hashed),
redirect_to=redirect_to
)
except UnknownMessageTypeError:
pass
fail_signal.send(None, request=request, message=message_id, dispatch=dispatch_id)
return redirect(redirect_to)
def unsubscribe(request, message_id, dispatch_id, hashed, redirect_to=None):
"""Handles unsubscribe request.
:param Request request:
:param int message_id:
:param int dispatch_id:
:param str hashed:
:param str redirect_to:
:return:
"""
return _generic_view(
'handle_unsubscribe_request', sig_unsubscribe_failed,
request, message_id, dispatch_id, hashed, redirect_to=redirect_to
)
def mark_read(request, message_id, dispatch_id, hashed, redirect_to=None):
"""Handles mark message as read request.
:param Request request:
:param int message_id:
:param int dispatch_id:
:param str hashed:
:param str redirect_to:
:return:
"""
if redirect_to is None:
redirect_to = get_static_url('img/sitemessage/blank.png')
return _generic_view(
'handle_mark_read_request', sig_mark_read_failed,
request, message_id, dispatch_id, hashed, redirect_to=redirect_to
)
|
python
|
"""
Modules for prediciting topological properties
"""
|
python
|
"""
Class of water block
"""
import os
from .block import Block
import math
class CurrentWaterRight(Block):
"""
Represents the block of water
"""
def __init__(
self,
settings: any,
path: str = 'advancing_hero/images/blocks/water3.png',
):
super().__init__(os.path.abspath(path),
settings,
settings.WATER,
interactable=True)
def player_interaction(self, player, *args, **kwargs):
super().player_interaction(player)
player.in_water = True
player.speed = player.speed_base
dx = 1
dy = 0
for tile in player.stage.tile_list:
# Check only blocks which are on screen and are interactable
if tile[1].bottom > 0 and tile[
1].top < player.settings.screen_height and tile[
2].is_interactable:
# Then check if it's solid. We do it on that order in case
# the block changes the player's speed.
if tile[2].is_solid and (dx or dy):
# Check collision in x direction
delta_x = 1 * dx / math.sqrt(dx * dx + dy * dy)
delta_y = 1 * dy / math.sqrt(dx * dx + dy * dy)
if tile[1].colliderect(player.rect.x + delta_x, player.rect.y,
player.rect.width, player.rect.height):
dx = 0
# Check for collision in y direction
if tile[1].colliderect(player.rect.x, player.rect.y + delta_y,
player.rect.width, player.rect.height):
dy = 0
if dx or dy:
player.rect.x += 1 * dx / math.sqrt(dx * dx + dy * dy)
player.rect.y += 1 * dy / math.sqrt(dx * dx + dy * dy)
if player.rect.bottom > player.settings.screen_height:
player.rect.bottom = player.settings.screen_height
if player.rect.top < 0:
player.rect.top = 0
if player.rect.right > player.settings.screen_width:
player.rect.right = player.settings.screen_width
if player.rect.left < 0:
player.rect.left = 0
class CurrentWaterLeft(Block):
"""
Represents the block of water
"""
def __init__(
self,
settings: any,
path: str = 'advancing_hero/images/blocks/water3.png',
):
super().__init__(os.path.abspath(path),
settings,
settings.WATER,
interactable=True)
def player_interaction(self, player, *args, **kwargs):
super().player_interaction(player)
player.in_water = True
player.speed = player.speed_base
dx = -1
dy = 0
for tile in player.stage.tile_list:
# Check only blocks which are on screen and are interactable
if tile[1].bottom > 0 and tile[
1].top < player.settings.screen_height and tile[
2].is_interactable:
# Then check if it's solid. We do it on that order in case
# the block changes the player's speed.
if tile[2].is_solid and (dx or dy):
# Check collision in x direction
delta_x = 1 * dx / math.sqrt(dx * dx + dy * dy)
delta_y = 1 * dy / math.sqrt(dx * dx + dy * dy)
if tile[1].colliderect(player.rect.x + delta_x, player.rect.y,
player.rect.width, player.rect.height):
dx = 0
# Check for collision in y direction
if tile[1].colliderect(player.rect.x, player.rect.y + delta_y,
player.rect.width, player.rect.height):
dy = 0
if dx or dy:
player.rect.x += 1 * dx / math.sqrt(dx * dx + dy * dy)
player.rect.y += 1 * dy / math.sqrt(dx * dx + dy * dy)
if player.rect.bottom > player.settings.screen_height:
player.rect.bottom = player.settings.screen_height
if player.rect.top < 0:
player.rect.top = 0
if player.rect.right > player.settings.screen_width:
player.rect.right = player.settings.screen_width
if player.rect.left < 0:
player.rect.left = 0
class CurrentWaterDown(Block):
"""
Represents the block of water
"""
def __init__(
self,
settings: any,
path: str = 'advancing_hero/images/blocks/water3.png',
):
super().__init__(os.path.abspath(path),
settings,
settings.WATER,
interactable=True)
def player_interaction(self, player, *args, **kwargs):
super().player_interaction(player)
player.in_water = True
player.speed = player.speed_base
dy = 1
dx = 0
for tile in player.stage.tile_list:
# Check only blocks which are on screen and are interactable
if tile[1].bottom > 0 and tile[
1].top < player.settings.screen_height and tile[
2].is_interactable:
# Then check if it's solid. We do it on that order in case
# the block changes the player's speed.
if tile[2].is_solid and (dx or dy):
# Check collision in x direction
delta_x = 1 * dx / math.sqrt(dx * dx + dy * dy)
delta_y = 1 * dy / math.sqrt(dx * dx + dy * dy)
if tile[1].colliderect(player.rect.x + delta_x, player.rect.y,
player.rect.width, player.rect.height):
dx = 0
# Check for collision in y direction
if tile[1].colliderect(player.rect.x, player.rect.y + delta_y,
player.rect.width, player.rect.height):
dy = 0
if dx or dy:
player.rect.x += 1 * dx / math.sqrt(dx * dx + dy * dy)
player.rect.y += 1 * dy / math.sqrt(dx * dx + dy * dy)
if player.rect.bottom > player.settings.screen_height:
player.rect.bottom = player.settings.screen_height
if player.rect.top < 0:
player.rect.top = 0
if player.rect.right > player.settings.screen_width:
player.rect.right = player.settings.screen_width
if player.rect.left < 0:
player.rect.left = 0
class CurrentWaterUp(Block):
"""
Represents the block of water
"""
def __init__(
self,
settings: any,
path: str = 'advancing_hero/images/blocks/water3.png',
):
super().__init__(os.path.abspath(path),
settings,
settings.WATER,
interactable=True)
def player_interaction(self, player, *args, **kwargs):
super().player_interaction(player)
player.in_water = True
player.speed = player.speed_base
dy = -1
dx = 0
for tile in player.stage.tile_list:
# Check only blocks which are on screen and are interactable
if tile[1].bottom > 0 and tile[
1].top < player.settings.screen_height and tile[
2].is_interactable:
# Then check if it's solid. We do it on that order in case
# the block changes the player's speed.
if tile[2].is_solid and (dx or dy):
# Check collision in x direction
delta_x = 1 * dx / math.sqrt(dx * dx + dy * dy)
delta_y = 1 * dy / math.sqrt(dx * dx + dy * dy)
if tile[1].colliderect(player.rect.x + delta_x, player.rect.y,
player.rect.width, player.rect.height):
dx = 0
# Check for collision in y direction
if tile[1].colliderect(player.rect.x, player.rect.y + delta_y,
player.rect.width, player.rect.height):
dy = 0
if dx or dy:
player.rect.x += 1 * dx / math.sqrt(dx * dx + dy * dy)
player.rect.y += 1 * dy / math.sqrt(dx * dx + dy * dy)
if player.rect.bottom > player.settings.screen_height:
player.rect.bottom = player.settings.screen_height
if player.rect.top < 0:
player.rect.top = 0
if player.rect.right > player.settings.screen_width:
player.rect.right = player.settings.screen_width
if player.rect.left < 0:
player.rect.left = 0
|
python
|
# Importar las dependencias de flask
from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for
# Importar clave/ayudantes de encriptacion
from werkzeug import check_password_hash, generate_password_hash
# Importar FireBase
from firebase import firebase
# Importar el objeto de base de datos desde el modulo principal de la aplicacion
from app import db
# Importar modulo de formulario
from app.mod_auth.forms import LoginForm
# Importar modulo de usuario (i.e. User)
from app.mod_auth.models import User
# Definir la coneccion a los nodos de FireBase
firebase = firebase.FirebaseApplication('https://ecclesiapp-fe5b2.firebaseio.com', None)
# Definir el blueprint: 'auth', establecer el prefijo de la url: app.url/auth
mod_auth = Blueprint('auth', __name__, url_prefix='/auth')
# Establecer las rutas y metodos aceptados
@mod_auth.route('/signin/', methods=['GET', 'POST'])
def signin():
# Si el formulario de acceso es mandado
form = LoginForm(request.form)
# Verificar el formulario de acceso
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and check_password_hash(user.password, form.password.data):
session['user_id'] = user.id
flash('Welcome %s' % user.name)
return redirect(url_for('auth.home'))
flash('Wrong email or password', 'error-message')
return render_template("auth/signin.html", form=form)
@mod_auth.route('/nada/', methods=['GET', 'POST'])
def nada():
passwd = "1234"
pw_hash = generate_password_hash(passwd)
return pw_hash
@mod_auth.route('/fbase/', methods=['GET', 'POST'])
def fbase():
# Obtener el contendo de la refencia
departamentos = firebase.get('/departamentos/MANAGUA/2/nombre', None)
return str(departamentos)
@mod_auth.route('/mysql/', methods=['GET', 'POST'])
def mysql():
mysql = Nube_Actividad.query.all()
return str(mysql)
|
python
|
def ErrorHandler(function):
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except Exception as e: # pragma: no cover
pass
return wrapper
|
python
|
import pandas as pd
import numpy as np
import tensorflow as tf
from math import floor
from dataset.date_format import START_CODE, INPUT_FNS, OUTPUT_VOCAB, encodeInputDateStrings, encodeOutputDateStrings, dateTupleToYYYYDashMMDashDD
def generateOrderedDates(minYear: str, maxYear: str) -> list:
daterange = pd.date_range(minYear, maxYear)
dates = []
for single_date in daterange:
date: list = single_date.strftime("%Y-%m-%d").split('-')
for index, value in enumerate(date):
date[index] = int(date[index])
dates.append(date)
return dates
def dateTuplesToTensor(dateTuples, dec_output_one_hot = True):
# Encoder Input
inputs = []
for _, fn in enumerate(INPUT_FNS):
for _, dateTuple in enumerate(dateTuples):
formatedDate = fn(dateTuple)
inputs.append(formatedDate)
encoderInput = encodeInputDateStrings(inputs)
# Decoder Input
isoDates = []
for _, dateTuple in enumerate(dateTuples):
isoDates.append(dateTupleToYYYYDashMMDashDD(dateTuple))
decoderInput = encodeOutputDateStrings(isoDates).astype("float32")
if not dec_output_one_hot:
decoderOutput = decoderInput
decoderOutput = np.tile(decoderOutput, (len(INPUT_FNS), 1)).astype("int32")
# Remove Last column
decoderInput = decoderInput[..., :-1]
# Create a single column with start code
shift = np.full((decoderInput.shape[0], 1), START_CODE, dtype='float32')
# Concat the tensors
decoderInput = np.concatenate((shift, decoderInput), axis=1)
# Tile to match the encoderInput
decoderInput = np.tile(decoderInput, (len(INPUT_FNS), 1))
if dec_output_one_hot:
# Decoder Output
decoderOutput = tf.one_hot(
encodeOutputDateStrings(isoDates),
len(OUTPUT_VOCAB)
)
# Tile to match the encoderInput
decoderOutput = np.tile(decoderOutput, (len(INPUT_FNS), 1, 1)).astype("int32")
return encoderInput, decoderInput, decoderOutput
def generateDataSet(minYear="1950-01-01", maxYear="2050-01-01", trainSplit=0.25, validationSplit=0.15, dec_output_one_hot = True):
dateTuples = generateOrderedDates(minYear, maxYear)
np.random.shuffle(dateTuples)
numTrain = floor(len(dateTuples)*trainSplit)
numValidation = floor(len(dateTuples)*validationSplit)
trainEncoderInput, trainDecoderInput, trainDecoderOutput = dateTuplesToTensor(
dateTuples[0:numTrain], dec_output_one_hot)
valEncoderInput, valDecoderInput, valDecoderOutput = dateTuplesToTensor(
dateTuples[numTrain:numTrain+numValidation], dec_output_one_hot)
testDateTuples = dateTuples[numTrain+numValidation: len(dateTuples)]
return trainEncoderInput, trainDecoderInput, trainDecoderOutput, valEncoderInput, valDecoderInput, valDecoderOutput, testDateTuples
|
python
|
from __future__ import division, print_function, absolute_import
# LIBTBX_SET_DISPATCHER_NAME iota.single_image
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT=1
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export BOOST_ADAPTBX_FPE_DEFAULT=1
'''
Author : Lyubimov, A.Y.
Created : 05/31/2018
Last Changed: 01/30/2019
Description : IOTA Single Image: can process single image using DIALS,
with an array of options (i.e. anything from only spotfinding, to indexing,
space group determination, refinement, integration)
'''
import os
import time
from iota.components.iota_init import initialize_single_image
from iota.components.iota_base import ProcessingBase
def parse_command_args():
""" Parses command line arguments (only options for now) """
parser = argparse.ArgumentParser(prog='iota.single_image')
parser.add_argument('path', type=str, nargs = '?', default = None,
help = 'Path to data file')
# parser.add_argument('--backend', type=str, default='dials',
# help='Backend for processing')
parser.add_argument('--paramfile', type=str, default=None,
help='Parameter file for processing')
parser.add_argument('--output_file', type=str, default=None,
help='Output filename')
parser.add_argument('--output_dir', type=str, default=None,
help='Output directory (for BluIce)')
parser.add_argument('--termfile', type=str, default='.stop',
help='Termination signal filename')
parser.add_argument('--index', type=int, default=1,
help='Numerical index of the image')
parser.add_argument('--min_bragg', type=int, default=10,
help='Minimum spots for successful spotfinding result')
parser.add_argument('--nproc', type=int, default=1,
help='Number of processors')
parser.add_argument('--action', type=str, default='spotfinding',
help='Code for how far to go; available codes: '
'spotfind, index, integrate')
parser.add_argument('--verbose', action = 'store_true',
help='Print information to stdout')
return parser
class SingleImageProcessor(ProcessingBase):
def __init__(self, *args, **kwargs):
ProcessingBase.__init__(self, *args, **kwargs)
def process(self):
file_wait_start = time.time()
errors = []
n_spots = 0
n_overloads = 0
res = (99, 99)
n_rings = 0
avg_I = 0
score = 0
sg = None
uc = None
lres = 999
hres = 999
img = self.params.input[0]
img_object = None
while True:
elapsed = time.time() - file_wait_start
if elapsed > 30:
errors.append('{} does not exist'.format(img))
print('DEBUG: ELAPSED = ', time.time() - file_wait_start)
break
if os.path.isfile(img):
input_entry = (1, img)
img_object = self.import_and_process(input_entry)
n_spots = img_object.final['spots']
score = img_object.final['indexed']
hres = img_object.final['res']
lres = img_object.final['lres']
sg = img_object.final['sg']
uc = ' '.join([
'{:.2f}'.format(img_object.final['a']),
'{:.2f}'.format(img_object.final['b']),
'{:.2f}'.format(img_object.final['c']),
'{:.2f}'.format(img_object.final['alpha']),
'{:.2f}'.format(img_object.final['beta']),
'{:.2f}'.format(img_object.final['gamma'])
])
errors.extend(img_object.errors)
break
if img_object:
if self.verbose:
print ('SPOTS FOUND: {}'.format(n_spots))
print ('INDEXING: {} INDEXED SPOTS'.format(score))
if res[0] != 999:
print ('RESOLUTION: {:.2f} - {:.2f}'.format(lres, hres))
if sg and uc:
print ('BRAVAIS LATTICE: {}'.format(sg))
print ('UNIT CELL: {}'.format(uc))
print ('TOTAL PROCESSING TIME: {:.2f} SEC'
''.format(time.time() - file_wait_start))
if errors:
for e in errors:
print (e)
# info = [self.index, len(observed), self.img, sg, uc]
if self.info.obj_list_file:
with open(self.info.obj_list_file, 'a') as outf:
info_line = '{} {} {} {} {}'.format(0, n_spots, img, sg, uc)
outf.write('{}\n'.format(info_line))
if self.verbose:
if errors:
err = errors[0]
print_errors = True
else:
err = ''
print_errors = False
print ('\n__RESULTS__')
print ('{} {} {} {:.2f} {} {} {} {} {{{}}}' .format(n_spots, n_overloads,
score, res[1], n_rings, 0, avg_I, 0, err))
if print_errors:
print ("__ERRORS__")
for e in errors:
print (e)
# ============================================================================ #
if __name__ == "__main__":
import argparse
args, unk_args = parse_command_args().parse_known_args()
info, iparams = initialize_single_image(img=os.path.abspath(args.path),
paramfile=args.paramfile,
output_file=args.output_file,
output_dir=args.output_dir,
min_bragg=args.min_bragg)
interceptor = SingleImageProcessor.for_single_image(info, iparams,
action_code=args.action,
verbose=args.verbose)
if args.output_dir is not None:
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
interceptor.start()
|
python
|
import requests
from bs4 import BeautifulSoup
import urllib.request
import pytesseract
from PIL import Image
from PIL import ImageEnhance
def shibie(filepath):
# 打开图片
img = Image.open(filepath)
img = img.convert('RGB')
enhancer = ImageEnhance.Color(img)
enhancer = enhancer.enhance(0)
enhancer = ImageEnhance.Brightness(enhancer)
enhancer = enhancer.enhance(2)
enhancer = ImageEnhance.Contrast(enhancer)
enhancer = enhancer.enhance(8)
enhancer = ImageEnhance.Sharpness(enhancer)
img = enhancer.enhance(20)
# 处理图片,提高图片的识别率
# 转化为灰度图片
img = img.convert('L')
# img.show()
# 对图片进行二值化处理
threshold = 140
table = []
for i in range(256):
if i < threshold:
table.append(0)
else:
table.append(1)
out = img.point(table, '1')
# out.show()
# exit()
# 将图片转化为RGB模式
img = img.convert('RGB')
print(pytesseract.image_to_string(img))
return pytesseract.image_to_string(img)
i = 0
while 1:
# 创建一个会话
s = requests.Session()
# 发送get请求
deng_url = 'https://so.gushiwen.org/user/login.aspx?from=http://so.gushiwen.org/user/collect.aspx'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36',
}
r = s.get(deng_url, headers=headers)
# 生产soup对象
soup = BeautifulSoup(r.text, 'lxml')
# 获取验证码的url
image_src = 'https://so.gushiwen.org' + soup.find('img', id='imgCode')['src']
# 将这个图片下载到本地
r = s.get(image_src, headers=headers)
with open('code1.png', 'wb') as fp:
fp.write(r.content)
# 获取页面中隐藏的两个数据
view_state = soup.find('input', id='__VIEWSTATE')['value']
view_generator = soup.find('input', id='__VIEWSTATEGENERATOR')['value']
code = shibie('code1.png')
# 抓包,抓取post请求,然后通过代码模拟发送post请求
post_url = 'https://so.gushiwen.org/user/login.aspx?from=http%3a%2f%2fso.gushiwen.org%2fuser%2fcollect.aspx'
data = {
'__VIEWSTATE': view_state,
'__VIEWSTATEGENERATOR': view_generator,
'from': 'http://so.gushiwen.org/user/collect.aspx',
'email':
'pwd':
'code': code,
'denglu': '登录',
}
r = s.post(url=post_url, headers=headers, data=data)
i += 1
print('这是第%s次登录' % i)
# print(r.text)
if '退出登录' in r.text:
break
print('登录成功')
|
python
|
import random
import uuid
from datetime import timedelta
import re
from discord import AllowedMentions, ButtonStyle, Embed
from squid.bot import CommandContext, SquidPlugin, command
from squid.bot.errors import CommandFailed
from squid.utils import now, parse_time, s
from .views import GiveawayView
class Giveaways(SquidPlugin):
def __init__(self, bot):
self.bot = bot
self.link_re = re.compile(
r"https:\/\/discord.com\/channels\/(\d*)\/(\d*)\/(\d*)"
)
@command()
def giveaway(self, ctx: CommandContext):
"""Create, Manage, and End Giveaways"""
...
@giveaway.subcommand(name="start")
def start(
self,
ctx: CommandContext,
# Required
time: str,
winners: int,
prize: str,
# Optional
amari: int = 0,
mee6: int = 0,
required_roles: str = "",
bypass_roles: str = "",
blacklist_roles: str = "",
booster: bool = None,
dank_lottery: int = None,
):
"""Starts a giveaway"""
if len(time) <= 1:
raise CommandFailed("Invalid time format")
if time.isdigit():
time += "s"
delta = parse_time(time)
if delta > timedelta(weeks=8):
raise CommandFailed(
"Time too long!\nYou cannot set a giveaway for more than 8 weeks"
)
store_key = uuid.uuid4().hex
stamp = int((now() + delta).timestamp())
requirements = {
v: self.bot.requirements[v].convert(ctx, k)
for v, k in {
"required_roles": required_roles,
"bypass_roles": bypass_roles,
"blacklist_roles": blacklist_roles,
"amari": amari,
"mee6": mee6,
"booster": booster,
"danklottery": dank_lottery,
}.items()
if k
}
description = ctx.setting(
"description",
time=f"<t:{stamp}:R>",
stamp=str(stamp),
requirements="\n".join(
[self.bot.requirements[k].display(v) for k, v in requirements.items()]
),
**requirements,
prize=prize,
winners=winners,
host=f"<@{ctx.author.id}>",
donor=f"<@{ctx.author.id}>",
channel_id=ctx.channel_id,
)
message = ctx.send(
embed=Embed(
title=prize,
description=description,
timestamp=now() + delta,
color=self.bot.colors["primary"],
).set_footer(text=f"{int(winners)} winner{s(winners)} | Ends at "),
view=GiveawayView(
key=store_key,
style=ButtonStyle.secondary,
label="Join",
),
)
with ctx.bot.db as db:
db.giveaways.insert_one(
{
"host_id": str(ctx.author.id),
"message_id": str(message["id"]),
"channel_id": str(ctx.channel_id),
"guild_id": str(ctx.guild_id),
"store_key": store_key,
"end": now() + delta,
"winners": int(winners),
"start": now(),
"active": True,
"prize": prize,
"requirements": requirements,
"data": {},
}
)
return ctx.respond(
embed=Embed(
description=f"Started a giveaway for `{delta}`",
color=self.bot.colors["primary"],
),
ephemeral=True,
)
@giveaway.subcommand(name="end")
def end(self, ctx: CommandContext, link: str):
"""
Stop a giveaway
"""
match = self.link_re.match(link)
if not match:
raise CommandFailed("Invalid link")
_, channel_id, message_id = match.groups()
with ctx.bot.db as db:
x = db.giveaways.find_one_and_update(
{
"guild_id": str(ctx.guild_id),
"channel_id": str(channel_id),
"message_id": str(message_id),
},
{"$set": {"end": now()}},
)
if x and x["active"]:
return ctx.respond(
embed=Embed(
description=f"Stopped giveaway for `{x['prize']}`",
color=self.bot.colors["primary"],
),
ephemeral=True,
)
elif x and x["active"]:
return ctx.respond(
embed=Embed(
description=f"giveaway for `{x['prize']}` already ended",
color=self.bot.colors["secondary"],
),
ephemeral=True,
)
else:
raise CommandFailed("I cannot find that giveaway")
@giveaway.subcommand(name="reroll")
def reroll(self, ctx: CommandContext, giveaway_id: str, amount: int = 1) -> None:
"""Rerolls a giveaway"""
if not giveaway_id.isdigit():
raise CommandFailed("Invalid giveaway ID")
with ctx.bot.db as db:
doc = db.giveaways.find_one({"message_id": giveaway_id})
if doc["active"]:
raise CommandFailed("Giveaway is still active")
elif "users" not in doc:
raise CommandFailed("Giveaway has no entrants or is still ending")
users = doc.get("users", [])
random.seed(f'{doc["message_id"]}{self.bot.http.token}')
random.shuffle(users)
next_val = doc.get("next_user_seed_input", 0)
winners = []
print(users)
while users and len(winners) < amount:
user = users[int(next_val % len(users))]
next_val += 1
winners.append(user)
if next_val != 0:
db.giveaways.find_one_and_update(
{"_id": doc["_id"]},
{"$set": {"next_user_seed_input": next_val}},
)
if winners:
winner_str = ", ".join([f"<@{i}>" for i in winners])
else:
winner_str = None
reroll_message = ctx.setting(
"reroll_message",
host=f"<@{ctx.author.id}>",
reroller=ctx.author,
reroll_channel=ctx.channel,
server=ctx.guild,
channel=ctx.channel,
link=f"https://discordapp.com/channels/{ctx.guild_id}/{doc['channel_id']}/{doc['message_id']}",
winners=(winner_str or "Nobody"),
prize=doc["prize"],
)
return ctx.respond(
content=reroll_message, allowed_mentions=AllowedMentions.none()
)
def setup(bot):
bot.add_plugin(Giveaways(bot))
bot.add_handler(GiveawayView)
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.