max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
hw/vendor/lowrisc_ibex/vendor/google_riscv-dv/pygen/experimental/riscv_data_page_gen.py
|
GregAC/opentitan
| 1,375 |
119120
|
"""Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import utils
import random
from bitstring import BitArray, BitStream
# -----------------------------------------------------------------------------------------
# RISC-V assmebly program data section generator
# There can be user mode and supervisor(kernel) mode data pages
# -----------------------------------------------------------------------------------------
class riscv_data_page_gen:
def __init__(self):
self.data_page_str = []
# TO DO: cfg
# cfg...
# The data section can be initialized with different data pattern:
# Random value, incremental value, all zeros
def gen_data(self, idx, pattern, num_of_bytes):
data = [None] * num_of_bytes
for i in range(len(data)):
if pattern == "RAND_DATA":
temp_data = random.randint(0, 255)
# data[i] = temp_data
data[i] = BitArray(uint=temp_data, length=8)
elif pattern == "INCR_VAL":
# data[i] = (idx+i) % 256
data[i] = BitArray(uint=(idx + i) % 256, length=8)
return data
# Generate the assembly code for the data section
def gen_data_page(self, pattern, is_kernel=0):
self.data_page_str.clear()
# TO DO: need to embed num_of_kernel_data_pages, num_of_data_pages, etc. in the riscv_core_setting
page_cnt = 1 if is_kernel else 2
page_size = 4096
for section_idx in range(page_cnt):
if is_kernel:
self.data_page_str.append("kernel_data_page_{}:".format(section_idx))
else:
self.data_page_str.append("data_page_{}:".format(section_idx))
# TO DO: need to embed data_page_alignment in the core_setting
self.data_page_str.append(".align 12")
for i in range(0, page_size, 32):
tmp_data = self.gen_data(i, pattern, 32)
tmp_str = ".word {:{}}".format(
utils.format_data(tmp_data), utils.length)
self.data_page_str.append(tmp_str)
|
features/feature_ebert_qa.py
|
Woffee/deformer
| 114 |
119131
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from .feature_bert_qa import BertQaDataBuilder
class EbertQaDataBuilder(BertQaDataBuilder):
TASK_FEATURES = ('feature_id', 'question_ids', 'context_ids')
def __init__(self, config):
super().__init__(config)
self.max_first_length = config.max_first_length + 2 # for [CLS], [SEP]
self.max_second_length = self.max_seq_length - self.max_first_length
def get_max_ctx_tokens(self, q_len=0):
return self.max_second_length - 1 # for [SEP]
def get_ctx_offset(self, q_len=0):
return self.max_first_length # length of q is fixed
def build_ids(self, feature_dict, q_ids, win_ctx_ids):
# for EBERT, first put cls, then put q, sep and ctx, sep
q_ids = q_ids[:self.max_first_length - 2]
first_part_ids = [self.cls_id] + q_ids + [self.sep_id]
first_len = len(first_part_ids)
first_ids = first_part_ids + [0] * (self.max_first_length - first_len)
second_part_ids = win_ctx_ids + [self.sep_id]
sec_len = len(second_part_ids)
second_ids = second_part_ids + [0] * (self.max_second_length - sec_len)
feature_dict['question_ids'] = first_ids
feature_dict['context_ids'] = second_ids
return feature_dict
@staticmethod
def record_parser(record, config):
max_q_length = config.max_first_length + 2
max_c_length = config.max_seq_length - max_q_length
from common import tf
name_to_features = {
"feature_id": tf.io.FixedLenFeature([], tf.int64),
"question_ids": tf.io.FixedLenFeature([max_q_length], tf.int64),
"context_ids": tf.io.FixedLenFeature([max_c_length], tf.int64),
}
if config.mode == 'train':
name_to_features["answer_start"] = tf.io.FixedLenFeature([],
tf.int64)
name_to_features["answer_end"] = tf.io.FixedLenFeature([], tf.int64)
name_to_features["cls"] = tf.io.FixedLenFeature([], tf.int64)
example = BertQaDataBuilder.record_to_example(record, name_to_features)
features = {
'feature_id': example['feature_id'],
'question_ids': example['question_ids'],
'context_ids': example['context_ids'],
}
if config.mode == 'train':
labels = {
'cls': example['cls'],
'answer_start': example['answer_start'],
'answer_end': example['answer_end'],
}
else:
labels = {}
return features, labels
@staticmethod
def two_seq_str_fn(feat):
q_str = ['|{:>5}|{:>15}|{:>10}'.format(
'q_idx', 'token', 'q_id')]
q_str.extend(['|{:>5}|{:>15}|{:>10}'.format(
q_idx, q_token, feat.question_ids[q_idx])
for q_idx, q_token in enumerate(feat.question_tokens)])
ctx_str = ['|{:>5}|{:>15}|{:>15}|{:>10}'.format(
'c_idx', 'token', 'span', 'c_id')]
ctx_str.extend(['|{:>5}|{:>15}|{:>15}|{:>10}'.format(
c_idx, c_token, str(feat.context_spans[c_idx]),
feat.context_ids[c_idx])
for c_idx, c_token in enumerate(feat.context_tokens)])
return q_str, ctx_str
@staticmethod
def inputs_str_fn(feat):
feature_strings = ['\tquestion_ids={}'.format(feat.question_ids),
'\tcontext_ids={}'.format(feat.context_ids)]
return feature_strings
|
language/bert_extraction/steal_bert_classifier/utils/merge_datasets_simple.py
|
Xtuden-com/language
| 1,199 |
119146
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Concatenate a list of datasets."""
import tensorflow.compat.v1 as tf
app = tf.app
flags = tf.flags
gfile = tf.gfile
logging = tf.logging
flags.DEFINE_string("dataset_paths", None, "CSV list of datasets to combine")
flags.DEFINE_string("output_path", None,
"New output directory where output corpus will be dumped")
flags.DEFINE_string("task_name", "mnli", "Task in consideration")
FLAGS = flags.FLAGS
num_labels = {"sst2": 2, "mnli": 3}
relevant_headers = {"sst2": ["sentence"], "mnli": ["sentence1", "sentence2"]}
def main(_):
output_data = []
dataset_paths = FLAGS.dataset_paths.split(",")
for dp in dataset_paths:
with gfile.Open(dp, "r") as f:
base_dataset = f.read().strip().split("\n")
base_dataset_header = base_dataset[0]
base_dataset = base_dataset[1:]
indices_base_dataset = [
base_dataset_header.split("\t").index(x)
for x in relevant_headers[FLAGS.task_name]
]
for point in base_dataset:
input_shards = [
point.split("\t")[index] for index in indices_base_dataset
]
output_data.append(("%d\t" % len(output_data)) + "\t".join(input_shards))
logging.info("Final dataset size = %d", len(output_data))
final_header = "index\t" + "\t".join(relevant_headers[FLAGS.task_name])
output_data = [final_header] + output_data
with gfile.Open(FLAGS.output_path, "w") as f:
f.write("\n".join(output_data) + "\n")
if __name__ == "__main__":
app.run(main)
|
apps/oauth/__init__.py
|
sbybfai/izone
| 1,009 |
119164
|
<gh_stars>1000+
default_app_config = 'oauth.apps.OauthConfig'
|
docs/feature/customop_example.py
|
yuhonghong66/minpy
| 1,271 |
119209
|
<reponame>yuhonghong66/minpy<filename>docs/feature/customop_example.py
@customop('numpy')
def my_softmax(x, y):
probs = numpy.exp(x - numpy.max(x, axis=1, keepdims=True))
probs /= numpy.sum(probs, axis=1, keepdims=True)
N = x.shape[0]
loss = -numpy.sum(numpy.log(probs[numpy.arange(N), y])) / N
return loss
def my_softmax_grad(ans, x, y):
def grad(g):
N = x.shape[0]
probs = numpy.exp(x - numpy.max(x, axis=1, keepdims=True))
probs /= numpy.sum(probs, axis=1, keepdims=True)
probs[numpy.arange(N), y] -= 1
probs /= N
return probs
return grad
my_softmax.def_grad(my_softmax_grad)
|
elpis/engines/common/input/clean_json.py
|
guillaume-wisniewski/elpis
| 118 |
119213
|
<gh_stars>100-1000
#!/usr/bin/python3
"""
Given a json file with transcript information these tools can perform
- manipulations including generating word lists or filtering output to
- exclude english/punctuation.
- Optionally provide the output json file name with -j
Usage: python3 clean_json.py [-h] [--i INFILE] [--o OUTFILE] [-r] [-u]
Copyright: University of Queensland, 2019
Contributors:
<NAME> - (The University of Queensland, 2017)
<NAME> - (The University of Queensland, 2018)
<NAME> - (The University of Queensland, 2019)
"""
import os
import re
import sys
import nltk
from argparse import ArgumentParser
from langid.langid import LanguageIdentifier, model
from nltk.corpus import words
from typing import Dict, List, Set
from ..utilities import load_json_file, write_data_to_json_file
def clean_utterance(utterance: Dict[str, str],
punctuation_to_collapse_by: str = '',
punctuation_to_explode_by: str = '',
special_cases: set = {None},
translation_tags: set = {None},
remove_english: bool = False,
english_words: set = None) -> (List[str], int):
"""
Takes an utterance and cleans it based on the rules established by the provided parameters.
:param utterance: a dictionary with a "transcript" key-value pair.
:param punctuation_to_collapse_by: punctuation marks to strip.
:param punctuation_to_explode_by: punctuation marks to replace with spaces.
:param special_cases: a list of words to always remove from the output.
:param translation_tags: a list of tags to always remove from the output.
:param remove_english: whether or not to remove English words.
:param english_words: a list of english words to remove from the transcript (we suggest the nltk corpus).
:return: a tuple with a list of 'cleaned' words and the number of English to remove.
"""
# TODO add interface setting to include user specific tags
# translation_tags = {"@eng@", "<ind:", "<eng:"}
# TODO add interface setting to skip this as caps are significant in some languages
utterance_string = utterance.get("transcript").lower()
dirty_words = utterance_string.split()
clean_words = []
english_word_count = 0
for word in dirty_words:
if word in special_cases:
continue
if remove_english and len(word) > 3 and word in english_words:
english_word_count += 1
continue
if word in translation_tags:
return [], 0
# Word is ok to use, now clean it
word = deal_with_punctuation(text=word,
punctuation_to_collapse_by=punctuation_to_collapse_by,
punctuation_to_explode_by=punctuation_to_explode_by)
clean_words.append(word)
return clean_words, english_word_count
def get_english_words() -> Set[str]:
"""
Gets a list of English words from the nltk corpora (~235k words).
N.B: will download the word list if not already available (~740kB), requires internet.
:return: a set containing the English words
"""
nltk.download("words") # Will only download if not locally available.
return set(words.words())
def are_words_valid(clean_words: List[str],
english_word_count: int,
remove_english: bool,
use_langid: bool) -> bool:
"""
Determines whether a list of words is valid based on the provided parameters.
:param clean_words: a list of clean word strings.
:param english_word_count: the number of english words removed from the string during cleaning.
:param remove_english: whether or not to remove english words.
:param use_langid: whether or not to use the langid library to determine if a word is English.
:return: True if utterance is valid, False otherwise.
"""
# Exclude utterance if empty after cleaning
cleaned_transcription = " ".join(clean_words).strip()
if cleaned_transcription == "":
return False
# Exclude utterance if > 10% english
if remove_english and len(clean_words) > 0 and english_word_count / len(clean_words) > 0.1:
# print(round(english_word_count / len(clean_words)), trans, file=sys.stderr)
return False
# Exclude utterance if langid thinks its english
if remove_english and use_langid:
langid_identifier = LanguageIdentifier.from_modelstring(model, norm_probs=True)
lang, prob = langid_identifier.classify(cleaned_transcription)
if lang == "en" and prob > 0.5:
return False
return True
def clean_json_utterance(utterance: Dict[str, str],
punctuation_to_collapse_by: str = '',
punctuation_to_explode_by: str = '',
special_cases: set = {None},
translation_tags: set = {None},
remove_english: bool = False,
use_langid: bool = False) -> Dict[str, str]:
"""
Clean an utterance (Python dictionary) based on the given parameters.
:param utterance: Python dictionary, must have a 'transcript' key-value.
:param punctuation_to_collapse_by: punctuation marks to strip.
:param punctuation_to_explode_by: punctuation marks to replace with spaces.
:param special_cases: a list of words to always remove from the output.
:param translation_tags: a list of tags to always remove from the output.
:param remove_english: whether or not to remove English from the utterances.
:param use_langid: whether or not to use the langid library to identify English to remove.
:return: cleaned utterance (dictionary).
"""
# TODO make this an interface setting
# special_cases = ["<silence>"] # Any words you want to ignore
if remove_english:
english_words = get_english_words() # pre-load English corpus
else:
english_words = set()
# Clean the text in the dict, returns a list of cleaned words
clean_words, english_word_count = \
clean_utterance(utterance=utterance,
punctuation_to_collapse_by=punctuation_to_collapse_by,
punctuation_to_explode_by=punctuation_to_explode_by,
special_cases=special_cases,
translation_tags=translation_tags,
remove_english=remove_english,
english_words=english_words
)
# Check that the cleaned words are valid (ie, not null, not English etc)
if are_words_valid(clean_words,
english_word_count,
remove_english,
use_langid):
cleaned_transcript = " ".join(clean_words).strip()
else:
# TODO is it best to return an empty str here or raise an error?
cleaned_transcript = ""
utterance['transcript'] = cleaned_transcript
return utterance
def clean_json_data(json_data: List[Dict[str, str]],
punctuation_to_collapse_by: str = '',
punctuation_to_explode_by: str = '',
special_cases: set = {None},
translation_tags: set = {None},
remove_english: bool = False,
use_langid: bool = False,
) -> List[Dict[str, str]]:
"""
Clean a list of utterances (Python dictionaries) based on the given parameters.
:param json_data: List of Python dictionaries, each must have a 'transcript' key-value.
:param punctuation_to_collapse_by: punctuation marks to strip.
:param punctuation_to_explode_by: punctuation marks to replace with spaces.
:param special_cases: a list of words to always remove from the output.
:param translation_tags: a list of tags to always remove from the output.
:param remove_english: whether or not to remove English from the utterances.
:param use_langid: whether or not to use the langid library to identify English to remove.
:return: list of cleaned utterances (dictionaries).
"""
json_data_cleaned = []
for utterance in json_data:
utterance_cleaned = clean_json_utterance(utterance,
punctuation_to_collapse_by=punctuation_to_collapse_by,
punctuation_to_explode_by=punctuation_to_explode_by,
special_cases=special_cases,
translation_tags=translation_tags,
remove_english=remove_english,
use_langid=use_langid)
json_data_cleaned.append(utterance_cleaned)
return json_data_cleaned
def extract_additional_corpora(additional_corpus: str = '',
corpus_txt: str = '',
punctuation_to_collapse_by: str = '',
punctuation_to_explode_by: str = '') -> None:
"""
Takes a text file, extracts all sentences and writes them to the main corpus file.
:param additional_corpus: the path to a plaintext file to extract additional sentences/lines from
:param corpus_txt: the path to the compiled corpus.txt file
:param punctuation_to_collapse_by: punctuation marks to strip
:param punctuation_to_explode_by: punctuation marks to replace with spaces
"""
print("corpus_txt", corpus_txt)
if os.path.exists(corpus_txt):
write_mode = 'a' # append if already exists
else:
write_mode = 'w' # make a new file if not
with open(corpus_txt, write_mode) as corpus_txt_file:
if os.path.exists(additional_corpus):
print(f"Extracting corpus examples from: {additional_corpus}")
with open(additional_corpus, "r", encoding="utf-8", ) as file_:
for line in file_.readlines():
# clean the text along the way
line = \
deal_with_punctuation(text=line,
punctuation_to_collapse_by=punctuation_to_collapse_by,
punctuation_to_explode_by=punctuation_to_explode_by)
if not line.endswith('\n'):
line = line + '\n'
corpus_txt_file.writelines(line)
else:
print(f"Provided additional text additional_corpus file path invalid: "
f"{additional_corpus}")
def deal_with_punctuation(text: str = '',
punctuation_to_collapse_by: str = '',
punctuation_to_explode_by: str = '') -> str:
"""
Removes punctuation from a string
:param text: original text
:param punctuation_to_collapse_by: punctuation marks to strip
:param punctuation_to_explode_by: punctuation marks to replace with spaces
:return: cleaned text
"""
new_text: str = text
# Prioritise exploding first, these are punctuation marks that the user sets
if punctuation_to_explode_by:
pattern_to_explode_by = re.escape(punctuation_to_explode_by)
new_text = re.sub(rf"[{pattern_to_explode_by}]", " ", new_text)
# Then strip the rest
if punctuation_to_collapse_by:
pattern_to_collapse_by = re.escape(punctuation_to_collapse_by)
new_text = re.sub(rf"[{pattern_to_collapse_by}]", "", new_text)
return new_text
def main() -> None:
"""
Run the entire clean_json process as a command line utility.
Usage: python3 clean_json.py [--i INFILE] [--o OUTFILE] [-r] [-u]
"""
parser: ArgumentParser = ArgumentParser()
parser.add_argument("-i", "--infile",
type=str,
help="The path to the dirty json file to clean.",
required=True)
parser.add_argument("-o", "--outfile",
type=str,
help="The path to the clean json file to write to",
required=True)
parser.add_argument("-r", "--remove_english",
help="Remove english-like utterances",
action="store_true")
parser.add_argument("-u", "--use_langid",
help="Use langid library to detect English",
action="store_true")
# TODO add defaults
parser.add_argument("-c", "--punctuation_to_collapse_by",
type=str,
help="Chars to strip")
parser.add_argument("-e", "--punctuation_to_explode_by",
type=str,
help="Chars to strip and replace with spaces")
arguments = parser.parse_args()
dirty_json_data: List[Dict[str, str]] = load_json_file(arguments.infile)
outfile = arguments.outfile if arguments.outfile else sys.stdout
print(f"Filtering dirty json data {arguments.infile}...")
filtered_data = clean_json_data(json_data=dirty_json_data,
remove_english=arguments.remove_english,
use_langid=arguments.use_langid,
punctuation_to_collapse_by=arguments.punctuation_to_collapse_by,
punctuation_to_explode_by=arguments.punctuation_to_explode_by)
write_data_to_json_file(data=list(filtered_data),
file_name=outfile)
print(f"Finished! Wrote {str(len(filtered_data))} transcriptions.")
if __name__ == "__main__":
main()
|
speculator/tests/unit/test_rsi.py
|
NathanBMcNamara/Speculator
| 106 |
119221
|
from speculator.features.RSI import RSI
import unittest
class RSITest(unittest.TestCase):
def test_eval_rs(self):
gains = [0.07, 0.73, 0.51, 0.28, 0.34, 0.43, 0.25, 0.15, 0.68, 0.24]
losses = [0.23, 0.53, 0.18, 0.40]
self.assertAlmostEqual(RSI.eval_rs(gains, losses), 2.746, places=3)
def test_eval_algorithm(self):
gains = [0.07, 0.73, 0.51, 0.28, 0.34, 0.43, 0.25, 0.15, 0.68, 0.24]
losses = [0.23, 0.53, 0.18, 0.40]
self.assertAlmostEqual(RSI.eval_algorithm(gains, losses),
73.307, places=3)
|
functions/image/image_processing.py
|
hsokooti/RegNet
| 187 |
119229
|
<gh_stars>100-1000
import math
import numpy as np
import os
import SimpleITK as sitk
import time
import functions.kernel.conv_kernel as conv_kernel
import functions.tf_utils as tfu
def ReadImage(im_address, waiting_time=1):
"""
simpleITK when writing, creates a blank file then fills it within few seconds. This waiting prevents reading blank files.
:param im_address:
:param waiting_time:
:return:
"""
while (time.time() - os.path.getmtime(im_address)) < waiting_time:
time.sleep(1)
im_sitk = sitk.ReadImage(im_address)
return im_sitk
def calculate_jac(dvf, voxel_size=None):
"""
:param dvf: a numpy array with shape of (sizeY, sizeX, 2) or (sizeZ, sizeY, sizeX, 3). You might use np.transpose before this function to correct the order of DVF shape.
:param voxel_size: physical voxel spacing in mm
:return: Jac
"""
if voxel_size is None:
voxel_size = [1, 1, 1]
if (len(np.shape(dvf)) - 1) != len(voxel_size):
raise ValueError('dimension of DVF is {} but dimension of voxelSize is {}'.format(
len(np.shape(dvf)) - 1, len(voxel_size)))
T = np.zeros(np.shape(dvf), dtype=np.float32) # derivative should be calculated on T which is DVF + indices (world coordinate)
indices = [None] * (len(np.shape(dvf)) - 1)
dvf_grad = []
if len(voxel_size) == 2:
indices[0], indices[1] = np.meshgrid(np.arange(0, np.shape(dvf)[0]),
np.arange(0, np.shape(dvf)[1]),
indexing='ij')
if len(voxel_size) == 3:
indices[0], indices[1], indices[2] = np.meshgrid(np.arange(0, np.shape(dvf)[0]),
np.arange(0, np.shape(dvf)[1]),
np.arange(0, np.shape(dvf)[2]),
indexing='ij')
for d in range(len(voxel_size)):
indices[d] = indices[d] * voxel_size[d]
T[:, :, :, d] = dvf[:, :, :, d] + indices[d]
dvf_grad.append([grad_mat / voxel_size[d] for grad_mat in np.gradient(T[:, :, :, d])]) # DVF.grad can be calculated in one shot without for loop.
if len(voxel_size) == 2:
jac = dvf_grad[0][0] * dvf_grad[1][1] - dvf_grad[0][1] * dvf_grad[1][0]
# f0/dir0 * f1/dir1 - f0/dir1 * f1/dir0
elif len(voxel_size) == 3:
jac = (dvf_grad[0][0] * dvf_grad[1][1] * dvf_grad[2][2] + # f0/dir0 + f1/dir1 + f2/dir2
dvf_grad[0][1] * dvf_grad[1][2] * dvf_grad[2][0] + # f0/dir1 + f1/dir2 + f2/dir0
dvf_grad[0][2] * dvf_grad[1][0] * dvf_grad[2][1] -
dvf_grad[0][2] * dvf_grad[1][1] * dvf_grad[2][0] -
dvf_grad[0][1] * dvf_grad[1][0] * dvf_grad[2][2] -
dvf_grad[0][0] * dvf_grad[1][2] * dvf_grad[2][1]
)
else:
raise ValueError('Length of voxel size should be 2 or 3')
return jac
def resampler_by_transform(im_sitk, dvf_t, im_ref=None, default_pixel_value=0, interpolator=sitk.sitkBSpline):
if im_ref is None:
im_ref = sitk.Image(dvf_t.GetDisplacementField().GetSize(), sitk.sitkInt8)
im_ref.SetOrigin(dvf_t.GetDisplacementField().GetOrigin())
im_ref.SetSpacing(dvf_t.GetDisplacementField().GetSpacing())
im_ref.SetDirection(dvf_t.GetDisplacementField().GetDirection())
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(im_ref)
resampler.SetInterpolator(interpolator)
resampler.SetDefaultPixelValue(default_pixel_value)
resampler.SetTransform(dvf_t)
out_im = resampler.Execute(im_sitk)
return out_im
def array_to_sitk(array_input, origin=None, spacing=None, direction=None, is_vector=False, im_ref=None):
if origin is None:
origin = [0, 0, 0]
if spacing is None:
spacing = [1, 1, 1]
if direction is None:
direction = [1, 0, 0, 0, 1, 0, 0, 0, 1]
sitk_output = sitk.GetImageFromArray(array_input, isVector=is_vector)
if im_ref is None:
sitk_output.SetOrigin(origin)
sitk_output.SetSpacing(spacing)
sitk_output.SetDirection(direction)
else:
sitk_output.SetOrigin(im_ref.GetOrigin())
sitk_output.SetSpacing(im_ref.GetSpacing())
sitk_output.SetDirection(im_ref.GetDirection())
return sitk_output
def upsampler_gpu(input, up_scale, output_shape_3d=None):
"""
Upsampling wiht GPU by an integer scale
:param input: can be a 3D numpy array or sitk image
:param up_scale: an integer value!
:param output_shape_3d:
:return: output: can be a numpy array or sitk image based on the input
"""
import tensorflow as tf
if isinstance(input, sitk.Image):
input_numpy = sitk.GetArrayFromImage(input)
mode = 'sitk'
else:
input_numpy = input
mode = 'numpy'
if not isinstance(up_scale, int):
raise ValueError('upscale should be integer. now it is {} with type of '.format(str(up_scale)) + type(up_scale))
orginal_input_shape = np.shape(input_numpy)
if len(orginal_input_shape) < 3:
input_numpy = np.expand_dims(input_numpy, -1)
tf.reset_default_graph()
sess = tf.Session()
input_tf = tf.placeholder(tf.float32, shape=[1, None, None, None, np.shape(input_numpy)[3]], name="Input")
upsampled_tf = tfu.layers.upsampling3d(input_tf, 'UpSampling', scale=up_scale, interpolator='trilinear', padding_mode='SYMMETRIC',
padding='same', output_shape_3d=output_shape_3d)
# upsampled_tf = tf.squeeze(upsampled_batch_tf, axis=0)
sess.run(tf.global_variables_initializer())
[upsampled_numpy] = sess.run([upsampled_tf], feed_dict={input_tf: np.expand_dims(input_numpy, axis=0)})
upsampled_numpy = np.squeeze(upsampled_numpy, 0)
if len(orginal_input_shape) < 3:
upsampled_numpy = np.squeeze(upsampled_numpy, -1)
if mode == 'numpy':
output = upsampled_numpy
elif mode == 'sitk':
output = array_to_sitk(upsampled_numpy.astype(np.float64),
origin=input.GetOrigin(),
spacing=tuple(i / up_scale for i in input.GetSpacing()),
direction=input.GetDirection(),
is_vector=True)
else:
output = None
return output
def upsampler_gpu_old(input, up_scale, default_pixel_value=0, dvf_output_size=None):
"""
Upsampling wiht GPU by an integer scale
:param input: can be a 3D numpy array or sitk image
:param up_scale: an integer value!
:param default_pixel_value:
:return: output: can be a numpy array or sitk image based on the input
"""
import tensorflow as tf
if isinstance(input, sitk.Image):
input_numpy = sitk.GetArrayFromImage(input)
mode = 'sitk'
else:
input_numpy = input
mode = 'numpy'
if not isinstance(up_scale, int):
raise ValueError('upscale should be integer. now it is {} with type of '.format(str(up_scale)) + type(up_scale))
tf.reset_default_graph()
sess = tf.Session()
dvf_tf = tf.placeholder(tf.float32, shape=[1, None, None, None, 3], name="DVF_Input")
DVF_outSize = tf.placeholder(tf.int32, shape=[3], name='DVF_outSize')
convKernelBiLinear = conv_kernel.bilinear_up_kernel(dim=3)
convKernelBiLinear = np.expand_dims(convKernelBiLinear, -1)
convKernelBiLinear = np.expand_dims(convKernelBiLinear, -1)
convKernelBiLinear = tf.constant(convKernelBiLinear)
myDVF0 = tf.expand_dims(dvf_tf[:, :, :, :, 0], -1)
myDVF1 = tf.expand_dims(dvf_tf[:, :, :, :, 1], -1)
myDVF2 = tf.expand_dims(dvf_tf[:, :, :, :, 2], -1)
upSampledDVF0 = tf.nn.conv3d_transpose(myDVF0, convKernelBiLinear, output_shape=(1, DVF_outSize[0], DVF_outSize[1], DVF_outSize[2], 1), strides=(1, up_scale, up_scale, up_scale, 1))
upSampledDVF1 = tf.nn.conv3d_transpose(myDVF1, convKernelBiLinear, output_shape=(1, DVF_outSize[0], DVF_outSize[1], DVF_outSize[2], 1), strides=(1, up_scale, up_scale, up_scale, 1))
upSampledDVF2 = tf.nn.conv3d_transpose(myDVF2, convKernelBiLinear, output_shape=(1, DVF_outSize[0], DVF_outSize[1], DVF_outSize[2], 1), strides=(1, up_scale, up_scale, up_scale, 1))
upSampledDVF = tf.squeeze(tf.concat([upSampledDVF0, upSampledDVF1, upSampledDVF2], -1), axis=0)
sess.run(tf.global_variables_initializer())
[output_numpy] = sess.run([upSampledDVF], feed_dict={dvf_tf: np.expand_dims(input_numpy, axis=0),
DVF_outSize: dvf_output_size})
if mode == 'numpy':
output = output_numpy
elif mode == 'sitk':
output = array_to_sitk(output_numpy.astype(np.float64),
origin=input.GetOrigin(),
spacing=tuple(i / up_scale for i in input.GetSpacing()),
direction=input.GetDirection(),
is_vector=True)
return output
def downsampler_gpu(input, down_scale, kernel_name='bspline', normalize_kernel=True, a=-0.5, default_pixel_value=0):
"""
Downsampling wiht GPU by an integer scale
:param input: can be a 2D or 3D numpy array or sitk image
:param down_scale: an integer value!
:param kernel_name:
:param normalize_kernel:
:param a:
:param default_pixel_value:
:return: output: can be a numpy array or sitk image based on the input
"""
import tensorflow as tf
if isinstance(input, sitk.Image):
input_numpy = sitk.GetArrayFromImage(input)
mode = 'sitk'
else:
input_numpy = input
mode = 'numpy'
if not isinstance(down_scale, int):
'type is:'
print(type(down_scale))
raise ValueError('down_scale should be integer. now it is {} with type of '.format(down_scale)+type(down_scale))
kernelDimension = len(np.shape(input_numpy))
input_numpy = np.expand_dims(input_numpy[np.newaxis], axis=-1)
if down_scale == 2:
kernel_size = 7
elif down_scale == 4:
kernel_size = 15
else:
raise ValueError('kernel_size is not defined for down_scale={}'.format(str(down_scale)))
padSize = (np.floor(kernel_size/2)).astype(np.int)
kenelStrides = tuple([down_scale] * kernelDimension)
tf.reset_default_graph()
sess = tf.Session()
x = tf.placeholder(tf.float32, shape=np.shape(input_numpy), name="InputImage")
x_pad = tf.pad(x, ([0, 0], [padSize, padSize], [padSize, padSize], [padSize, padSize], [0, 0]), constant_values=default_pixel_value)
convKernelGPU = conv_kernel.convDownsampleKernel(kernel_name, kernelDimension, kernel_size, normalizeKernel=normalize_kernel, a=a)
convKernelGPU = np.expand_dims(convKernelGPU, -1)
convKernelGPU = np.expand_dims(convKernelGPU, -1)
convKernelGPU = tf.constant(convKernelGPU)
y = tf.nn.convolution(x_pad, convKernelGPU, 'VALID', strides=kenelStrides)
sess.run(tf.global_variables_initializer())
[output_numpy] = sess.run([y], feed_dict={x: input_numpy})
if kernelDimension == 2:
output_numpy = output_numpy[0, :, :, 0]
if kernelDimension == 3:
output_numpy = output_numpy[0, :, :, :, 0]
if mode == 'numpy':
output = output_numpy
elif mode == 'sitk':
output = array_to_sitk(output_numpy, origin=input.GetOrigin(),
spacing=tuple(i * down_scale for i in input.GetSpacing()), direction=input.GetDirection())
return output
# def downsampler_sitk(image_sitk, down_scale, im_ref=None, default_pixel_value=0, interpolator=sitk.sitkBSpline, dimension=3):
# if im_ref is None:
# im_ref = sitk.Image(tuple(round(i / down_scale) for i in image_sitk.GetSize()), sitk.sitkInt8)
# im_ref.SetOrigin(image_sitk.GetOrigin())
# im_ref.SetDirection(image_sitk.GetDirection())
# im_ref.SetSpacing(tuple(i * down_scale for i in image_sitk.GetSpacing()))
# identity = sitk.Transform(dimension, sitk.sitkIdentity)
# downsampled_sitk = resampler_by_transform(image_sitk, identity, im_ref=im_ref, default_pixel_value=default_pixel_value, interpolator=interpolator)
# return downsampled_sitk
def resampler_sitk(image_sitk, spacing=None, scale=None, im_ref=None, im_ref_size=None, default_pixel_value=0, interpolator=sitk.sitkBSpline, dimension=3):
"""
:param image_sitk: input image
:param spacing: desired spacing to set
:param scale: if greater than 1 means downsampling, less than 1 means upsampling
:param im_ref: if im_ref available, the spacing will be overwritten by the im_ref.GetSpacing()
:param im_ref_size: in sikt order: x, y, z
:param default_pixel_value:
:param interpolator:
:param dimension:
:return:
"""
if spacing is None and scale is None:
raise ValueError('spacing and scale cannot be both None')
if spacing is None:
spacing = tuple(i * scale for i in image_sitk.GetSpacing())
if im_ref_size is None:
im_ref_size = tuple(round(i / scale) for i in image_sitk.GetSize())
elif scale is None:
ratio = [spacing_dim / spacing[i] for i, spacing_dim in enumerate(image_sitk.GetSpacing())]
if im_ref_size is None:
im_ref_size = tuple(math.ceil(size_dim * ratio[i]) - 1 for i, size_dim in enumerate(image_sitk.GetSize()))
else:
raise ValueError('spacing and scale cannot both have values')
if im_ref is None:
im_ref = sitk.Image(im_ref_size, sitk.sitkInt8)
im_ref.SetOrigin(image_sitk.GetOrigin())
im_ref.SetDirection(image_sitk.GetDirection())
im_ref.SetSpacing(spacing)
identity = sitk.Transform(dimension, sitk.sitkIdentity)
resampled_sitk = resampler_by_transform(image_sitk, identity, im_ref=im_ref,
default_pixel_value=default_pixel_value,
interpolator=interpolator)
return resampled_sitk
def SITKshow(img, title=None, margin=0.05, dpi=80):
import matplotlib.pyplot as plt
nda = sitk.GetArrayViewFromImage(img)
spacing = img.GetSpacing()
ysize = nda.shape[0]
xsize = nda.shape[1]
figsize = (1 + margin) * ysize / dpi, (1 + margin) * xsize / dpi
fig = plt.figure(title, figsize=figsize, dpi=dpi)
ax = fig.add_axes([margin, margin, 1 - 2 * margin, 1 - 2 * margin])
extent = (0, xsize * spacing[1], 0, ysize * spacing[0])
t = ax.imshow(nda,
extent=extent,
interpolation='hamming',
cmap='gray',
origin='lower')
if (title):
plt.title(title)
def index_to_world(landmark_index, spacing=None, origin=None, direction=None, im_ref=None):
if im_ref is None:
if spacing is None:
spacing = [1, 1, 1]
if origin is None:
origin = [0, 0, 0]
if direction is None:
direction = [1, 0, 0, 0, 1, 0, 0, 0, 1]
else:
spacing = list(im_ref.GetSpacing())
origin = list(im_ref.GetOrigin())
direction = list(im_ref.GetDirection())
landmarks_point = [None] * len(landmark_index)
for p in range(len(landmark_index)):
landmarks_point[p] = [index * spacing[i] + origin[i] for i, index in enumerate(landmark_index[p])]
return landmarks_point
def world_to_index(landmark_point, spacing=None, origin=None, direction=None, im_ref=None):
if im_ref is None:
if spacing is None:
spacing = [1, 1, 1]
if origin is None:
origin = [0, 0, 0]
if direction is None:
direction = [1, 0, 0, 0, 1, 0, 0, 0, 1]
else:
spacing = list(im_ref.GetSpacing())
origin = list(im_ref.GetOrigin())
direction = list(im_ref.GetDirection())
landmarks_index = [None] * len(landmark_point)
for p in range(len(landmark_point)):
landmarks_index[p] = [round(point - origin[i] / spacing[i]) for i, point in enumerate(landmark_point[p])]
return landmarks_index
if __name__ == '__main__':
input = np.ones((50, 50, 50, 3))
input_sitk = sitk.GetImageFromArray(input, isVector=1)
output_sitk = upsampler_gpu(input_sitk, 2, default_pixel_value=0, output_shape_3d=[100, 100, 100])
|
modelchimp/migrations/0044_auto_20190130_0754.py
|
akarsh3007/modelchimp
| 134 |
119230
|
# Generated by Django 2.1.3 on 2019-01-30 07:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('modelchimp', '0043_machinelearningmodel_grid_search'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='first_name',
field=models.CharField(blank=True, default='Unknown', max_length=200),
),
migrations.AlterField(
model_name='profile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL),
),
]
|
observations/r/sparrows.py
|
hajime9652/observations
| 199 |
119246
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def sparrows(path):
"""Sparrows
Weight and wing length for a sample of Savannah sparrows
A dataset with 116 observations on the following 3 variables.
`Treatment`
Nest adjustment: `control`, `enlarged`, or `reduced`
`Weight`
Weight (in grams)
`WingLength`
Wing length (in mm)
We thank <NAME> and Professor <NAME> from the
Department of Biology at Kenyon College for allowing us to use these
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `sparrows.csv`.
Returns:
Tuple of np.ndarray `x_train` with 116 rows and 3 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'sparrows.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Stat2Data/Sparrows.csv'
maybe_download_and_extract(path, url,
save_file_name='sparrows.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
torcms/handlers/collect_handler.py
|
bukun/TorCMS
| 243 |
119247
|
# -*- coding:utf-8 -*-
'''
For User collection
'''
import json
import tornado.web
from config import CMS_CFG
from torcms.core import tools
from torcms.core.base_handler import BaseHandler
from torcms.core.tools import logger
from torcms.model.collect_model import MCollect
class CollectHandler(BaseHandler):
'''
For User collection
'''
def initialize(self, **kwargs):
super().initialize()
def get(self, *args, **kwargs):
url_str = args[0]
if url_str:
url_arr = self.parse_url(url_str)
else:
return False
if len(url_arr) == 1:
if url_str == 'list':
self.show_list(url_str)
else:
if self.get_current_user():
self.add_or_update(url_str)
else:
self.set_status(403)
return False
elif len(url_arr) == 2:
if url_arr[0] == 'remove':
self.remove_collect(url_arr[1])
else:
self.show_list(url_arr[0], url_arr[1])
@tornado.web.authenticated
def add_or_update(self, app_id):
'''
Add or update the category.
'''
logger.info('Collect info: user-{0}, uid-{1}'.format(
self.userinfo.uid, app_id))
MCollect.add_or_update(self.userinfo.uid, app_id)
out_dic = {'success': True}
return json.dump(out_dic, self)
@tornado.web.authenticated
def remove_collect(self, post_id):
'''
Add or update the category.
'''
logger.info('Collect info: user-{0}, uid-{1}'.format(
self.userinfo.uid, post_id))
MCollect.remove_collect(self.userinfo.uid, post_id)
out_dic = {'success': True}
return json.dump(out_dic, self)
@tornado.web.authenticated
def show_list(self, the_list, cur_p=''):
'''
List of the user collections.
'''
current_page_num = int(cur_p) if cur_p else 1
current_page_num = 1 if current_page_num < 1 else current_page_num
num_of_cat = MCollect.count_of_user(self.userinfo.uid)
page_num = int(num_of_cat / CMS_CFG['list_num']) + 1
kwd = {'current_page': current_page_num}
self.render('misc/collect/list.html',
recs_collect=MCollect.query_pager_by_all(
self.userinfo.uid, current_page_num).objects(),
pager=tools.gen_pager_purecss(
'/collect/{0}'.format(the_list), page_num,
current_page_num),
userinfo=self.userinfo,
cfg=CMS_CFG,
kwd=kwd)
|
test/python/testpipeline/testquantization.py
|
malywonsz/txtai
| 1,893 |
119294
|
"""
Quantization module tests
"""
import platform
import unittest
from transformers import AutoModel
from txtai.pipeline import HFModel, HFPipeline
class TestQuantization(unittest.TestCase):
"""
Quantization tests.
"""
@unittest.skipIf(platform.system() == "Darwin", "Quantized models not supported on macOS")
def testModel(self):
"""
Tests quantizing a model through HFModel.
"""
model = HFModel(quantize=True, gpu=False)
model = model.prepare(AutoModel.from_pretrained("google/bert_uncased_L-2_H-128_A-2"))
self.assertIsNotNone(model)
@unittest.skipIf(platform.system() == "Darwin", "Quantized models not supported on macOS")
def testPipeline(self):
"""
Tests quantizing a model through HFPipeline.
"""
pipeline = HFPipeline("text-classification", "google/bert_uncased_L-2_H-128_A-2", True, False)
self.assertIsNotNone(pipeline)
|
blesuite/event_handler.py
|
jreynders/BLESuite-1
| 198 |
119295
|
from blesuite.pybt.gap import GAP
import blesuite.pybt.att as att
import logging
log = logging.getLogger(__name__)
# log.addHandler(logging.NullHandler())
class BTEventHandler(object):
"""
BTEventHandler is a event handling class passed to the BLEConnectionManager in order to
have user-controlled callbacks that are called when BLE events occur (ATT, SMP, L2CAP, Connection, scan, metadata,
and disconnect event). This class provides the skeleton for functions called by the stack when an event
is received. For instance, when an ATT packet is received, the stack will process the packet and other ATT hooks,
then trigger supplied BTEventHandler instance BTEventHandler.on_att_event(connection_handle, data)
:param connection_manager: BLEConnectionManager instance that allows the user to send packets whilst
processing an event hook trigger.
:type connection_manager: BLEConnectionManager
"""
def __init__(self, connection_manager):
self.connection_manager = connection_manager
def __del__(self):
self.connection_manager = None
def on_scan_event(self, address, address_type, data):
"""
Called when a scan event is received by the stack.
:param address: Address of the seen peer device
:type address: str
:param address_type: Address type of the seen peer device
:type address_type: int
:param data: GAP data from the peer device advertisement packet
:type data: list of strings or a single string
:return:
:rtype:
"""
log.debug("Saw %s (%s)" % (address, "public" if address_type == 0 else "random"))
if len(data) > 0:
try:
gap = GAP()
if isinstance(data, list):
log.debug("data was list!")
for i, j in enumerate(data):
gap.decode(str(data[i]))
else:
gap.decode(data)
log.debug("GAP: %s" % gap)
except Exception as e:
log.debug("Exception when reading GAP: %s" % e)
return
def on_metadata_event(self, status, connection_handle, meta, address, event):
"""
Called when a metadata event is triggered by the HCI device. This represents a metadata event not
associated with a scan or connection event.
:param status: Status of the LE Meta Event - Sub Event
:type status: int
:param connection_handle: The connection handle the event was received
:type connection_handle: int
:param meta: The metadata
:type meta: str
:param address: Peer address that caused the metadata event
:type address: str
:param event: The sub event code
:type event: int
:return:
:rtype:
"""
log.debug("Received LE Meta packet from %s Event: %s!" % (address, event))
def on_connect_event(self, status, connection_handle, meta, address, address_type):
"""
Called when a metadata event is triggered by the HCI device with a Connection Compete LE sub event.
:param status: Status of the connection
:type status: int
:param connection_handle: The connection handle the event was received
:type connection_handle: int
:param meta: The metadata
:type meta: str
:param address: Peer address that caused the metadata event
:type address: str
:param address_type: Peer address type
:type address_type: int
:return:
:rtype:
"""
log.debug("Connected to %s!" % address)
return
def on_disconnect_event(self, connection_handle, reason):
"""
Called when a disconnect event is received.
:param connection_handle: The connection handle the disconnect occurred on.
:type connection_handle: int
:param reason: The reason for the disconnect
:type reason: int
:return:
:rtype:
"""
log.debug("Disconnected! ConnectionHandle: %s reason: %s" % (connection_handle, reason))
return
def on_att_event(self, connection_handle, data):
"""
Called when an ATT event is received (after other ATT processing and handlers have been invoked).
:param connection_handle: Connection handle the event was received on
:type connection_handle: int
:param data: Packet data
:type data: Scapy ATT packet -- scapy.layers.bluetooth -- Contains an ATT Header and an ATT body
:return:
:rtype:
"""
log.debug("ATT Event Connection Handle: %s Data: %s" % (connection_handle, data))
return
def on_unknown_event(self, packet):
"""
Called when an unknown event is received. Note: These are usually packet types not supported currently
by the routing core of the stack.
:param packet: Scapy Bluetooth packet.
:type packet: Packet
:return:
:rtype:
"""
log.debug("Unknown Event Packet: %s" % packet)
return
class ATTSecurityHook(object):
"""
ATTSecurityHook is used by the blesuite.pybyt.gatt to hook, modify, or overwrite security decisions
made by the ATT database based on the current BLE connection security, the attribute properties, and
the attribute permissions. These hooks are called after each security evaluation step has completed and allows the
hook to view and modify the final result of the check. The hooks receive identifyin information about the target
attribute and the association permissions and properties.
"""
def __init__(self):
pass
def att_authorization_check_hook(self, att_opcode, uuid, att_property, att_read_permission, att_write_permission,
connection_permission, authorization_required):
"""
Called when an authorization check is made. This check is part of the security check workflow
and validates that if the attribute requires authorization in order to access it, then the
authorization procedure must succeed (implementation dependent procedure). In BLESuite, this function
acts as the authorization procedure.
:param att_opcode: ATT opcode of the request attempting to access the attribute
:type att_opcode: int
:param uuid: UUID (16-bit or 128-bit) of the target attribute
:type uuid: blesuite.pybt.gatt.UUID object instance
:param att_property: Attribute properties assigned to the attribute (blesuite.utils.att_utils.ATT_PROP_READ | blesuite.utils.att_utils.ATT_PROP_WRITE)
:type att_property: int
:param att_read_permission: Security requirements of attribute in order to read the value
:type att_read_permission: blesuite.pybt.sm.SecurityMode (has attributes security_level and security_mode)
:param att_write_permission: Security requirements of attribute in order to write to the value
:type att_write_permission: blesuite.pybt.sm.SecurityMode (has attributes security_level and security_mode)
:param connection_permission: Security Manager associated with the current BLE connection where the attribute is being accessed.
:type connection_permission: blesuite.pybt.sm.SecurityManager
:param authorization_required: Flag to indicate whether the attribute requires authorization
:type authorization_required: bool
:return: Result that indicates if the check passed or not (True = passed)
:rtype: bool
"""
check_passed = True
log.debug("ATT Authorization check invoked. Operation: %d Target Attribute: %s ATT Property: %d "
"ATT Read Security Mode: %d ATT Read Security Level: %d "
"ATT Read Security Mode: %d ATT Read Security Level: %d "
"Connection Security Mode: %d Connection Security Level: %d "
"Attribute requires authorization: %d" %
(att_opcode, uuid, att_property, att_read_permission.security_mode, att_read_permission.security_level,
att_write_permission.security_mode, att_write_permission.security_level,
connection_permission.get_security_mode_mode(), connection_permission.get_security_mode_level(),
authorization_required))
return check_passed
def att_authentication_check_hook(self, att_authentication_check_result,
att_opcode, uuid, att_property, att_read_permission,
att_write_permission, connection_permission):
"""
Called when an authentication check is made. This check is part of the security check workflow
and validates that the connection, on which the attribute access request is being made, has been
authenticated. (This means that the pairing method used to establish the encrypted connection must
be authenticated if authentication is required)
:param att_authentication_check_result: Result of the ATT server's authentication check
:type att_authentication_check_result: bool
:param att_opcode: ATT opcode of the request attempting to access the attribute
:type att_opcode: int
:param uuid: UUID (16-bit or 128-bit) of the target attribute
:type uuid: blesuite.pybt.gatt.UUID object instance
:param att_property: Attribute properties assigned to the attribute (blesuite.utils.att_utils.ATT_PROP_READ | blesuite.utils.att_utils.ATT_PROP_WRITE)
:type att_property: int
:param att_read_permission: Security requirements of attribute in order to read the value
:type att_read_permission: blesuite.pybt.sm.SecurityMode (has attributes security_level and security_mode)
:param att_write_permission: Security requirements of attribute in order to write to the value
:type att_write_permission: blesuite.pybt.sm.SecurityMode (has attributes security_level and security_mode)
:param connection_permission: Security Manager associated with the current BLE connection where the attribute is being accessed.
:type connection_permission: blesuite.pybt.sm.SecurityManager
:return: Result that indicates if the check passed or not (True = passed)
:rtype: bool
"""
check_passed = att_authentication_check_result
log.debug("ATT Authentication check invoked. Result: %d"
"Operation: %d Target Attribute: %s ATT Property: %d "
"ATT Read Security Mode: %d ATT Read Security Level: %d "
"ATT Read Security Mode: %d ATT Read Security Level: %d "
"Connection Security Mode: %d Connection Security Level: %d" %
(att_authentication_check_result,
att_opcode, uuid, att_property, att_read_permission.security_mode, att_read_permission.security_level,
att_write_permission.security_mode, att_write_permission.security_level,
connection_permission.get_security_mode_mode(), connection_permission.get_security_mode_level()))
return check_passed
def att_encryption_check_hook(self, att_encryption_check_result,
att_opcode, uuid, att_property, att_read_permission,
att_write_permission, connection_permission, is_connection_encrypted):
"""
Called when an encryption check is made. This check is part of the security check workflow
and validates that the connection, on which the attribute access request is being made, is
encrypted.
:param att_encryption_check_result: Result of the ATT server's encryption check
:type att_encryption_check_result: bool
:param att_opcode: ATT opcode of the request attempting to access the attribute
:type att_opcode: int
:param uuid: UUID (16-bit or 128-bit) of the target attribute
:type uuid: blesuite.pybt.gatt.UUID object instance
:param att_property: Attribute properties assigned to the attribute (blesuite.utils.att_utils.ATT_PROP_READ | blesuite.utils.att_utils.ATT_PROP_WRITE)
:type att_property: int
:param att_read_permission: Security requirements of attribute in order to read the value
:type att_read_permission: blesuite.pybt.sm.SecurityMode (has attributes security_level and security_mode)
:param att_write_permission: Security requirements of attribute in order to write to the value
:type att_write_permission: blesuite.pybt.sm.SecurityMode (has attributes security_level and security_mode)
:param connection_permission: Security Manager associated with the current BLE connection where the attribute is being accessed.
:type connection_permission: blesuite.pybt.sm.SecurityManager
:param is_connection_encrypted: Flag to indicate whether the connection requesting access to the attribute is encrypted
:type is_connection_encrypted: bool
:return: Result that indicates if the check passed or not (True = passed)
:rtype: bool
"""
check_passed = att_encryption_check_result
log.debug("ATT Encryption check invoked. Result: %d"
"Operation: %d Target Attribute: %s ATT Property: %d "
"ATT Read Security Mode: %d ATT Read Security Level: %d "
"ATT Read Security Mode: %d ATT Read Security Level: %d "
"Connection Security Mode: %d Connection Security Level: %d Is Connection Encrypted?: %s",
(att_encryption_check_result,
att_opcode, uuid, att_property, att_read_permission.security_mode, att_read_permission.security_level,
att_write_permission.security_mode, att_write_permission.security_level,
connection_permission.get_security_mode_mode(), connection_permission.get_security_mode_level(),
is_connection_encrypted))
return check_passed
def att_operation_supported_check_hook(self, att_operation_supported_check_result,
att_opcode, uuid, att_property):
"""
Called when an ATT operation check is made. This check is part of the security check workflow
and validates that the requested ATT operation (read, write) is supported by the target attribute.
:param att_operation_supported_check_result: Result of the ATT server's ATT operation check
:type att_operation_supported_check_result: bool
:param att_opcode: ATT opcode of the request attempting to access the attribute
:type att_opcode: int
:param uuid: UUID (16-bit or 128-bit) of the target attribute
:type uuid: blesuite.pybt.gatt.UUID object instance
:param att_property: Attribute properties assigned to the attribute (blesuite.utils.att_utils.ATT_PROP_READ | blesuite.utils.att_utils.ATT_PROP_WRITE)
:type att_property: int
:return: Result that indicates if the check passed or not (True = passed)
:rtype: bool
"""
check_passed = att_operation_supported_check_result
log.debug("ATT Operation supported check invoked. Result: %d"
"att_opcode: %d uuid: %s att_property: %d" % (
att_operation_supported_check_result, att_opcode,
uuid, att_property
))
return check_passed
def att_security_check_hook(self, att_operation_supported_check_result,
att_authorization_check_result,
att_encryption_check_result,
att_authentication_check_result,
att_opcode, uuid, att_property, att_read_permission, att_write_permission,
connection_permission, authorization_required, is_connection_encrypted):
"""
Called when a request to access an attribute has been made by a peer before the operation
is executed. This hook occurs at the end of the security check function that processes
the ATT operation, authorization requirements, encryption requirements,
and authentication requirements security checks. This hook receives all results of the security checks
and the returned result will notify the ATT server if the operation should continue or be discarded
with a particular error. (Errors will trigger based on the check that fails. The order of checks is
operation, authorization, encryption, and authentication)
:param att_operation_supported_check_result: Result of the ATT server's ATT operation check
:type att_operation_supported_check_result: bool
:param att_authorization_check_result: Result of the ATT server's authorization check
:type att_authorization_check_result: bool
:param att_encryption_check_result: Result of the ATT server's encryption check
:type att_encryption_check_result: bool
:param att_authentication_check_result: Result of the ATT server's authentication check
:type att_authentication_check_result: bool
:param att_opcode: ATT opcode of the request attempting to access the attribute
:type att_opcode: int
:param uuid: UUID (16-bit or 128-bit) of the target attribute
:type uuid: blesuite.pybt.gatt.UUID object instance
:param att_property: Attribute properties assigned to the attribute (blesuite.utils.att_utils.ATT_PROP_READ | blesuite.utils.att_utils.ATT_PROP_WRITE)
:type att_property: int
:param att_read_permission: Security requirements of attribute in order to read the value
:type att_read_permission: blesuite.pybt.sm.SecurityMode (has attributes security_level and security_mode)
:param att_write_permission: Security requirements of attribute in order to write to the value
:type att_write_permission: blesuite.pybt.sm.SecurityMode (has attributes security_level and security_mode)
:param connection_permission: Security Manager associated with the current BLE connection where the attribute is being accessed.
:param authorization_required: Flag to indicate whether the attribute requires authorization
:type authorization_required: bool
:type connection_permission: blesuite.pybt.sm.SecurityManager
:param is_connection_encrypted: Flag to indicate whether the connection requesting access to the attribute is encrypted
:type is_connection_encrypted: bool
:return: Result that indicates each check has passed (order - operation, authorization, encryption, authentication)
:rtype: tuple of bool (4 element)
"""
log.debug("ATT Security check hook invoked. "
"ATT Operation supported check result: %d "
"ATT Authorization security check result: %d "
"ATT encryption security check result: %d "
"ATT Authentication security check result: %d "
"Operation: %d Target Attribute: %s ATT Property: %d "
"ATT Read Security Mode: %d ATT Read Security Level: %d "
"ATT Read Security Mode: %d ATT Read Security Level: %d "
"Connection Security Mode: %d Connection Security Level: %d "
"Authorization required: %d "
"Is connection encrypted?: %s" %
(att_operation_supported_check_result,
att_authorization_check_result,
att_encryption_check_result,
att_authentication_check_result,
att_opcode, uuid, att_property, att_read_permission.security_mode,
att_read_permission.security_level,
att_write_permission.security_mode, att_write_permission.security_level,
connection_permission.get_security_mode_mode(), connection_permission.get_security_mode_level(),
authorization_required,
is_connection_encrypted))
return (att_operation_supported_check_result,
att_authorization_check_result,
att_encryption_check_result,
att_authentication_check_result)
class ATTEventHook(object):
"""
ATTEventHook is used by blesuite.pybt.att to allow the user to hook ATT operations triggered by a peer
ATT request. These hooks allow the user to view and/or modify outgoing ATT responses, incoming write requests,
and incoming long write requests (prepared write and execute write).
"""
def __init__(self):
pass
def att_response_hook(self, received_packet, our_response_packet):
"""
Called before an ATT response packet is sent to a peer device. This enables the response packet to be
viewed in order to modify read response data, send notifications/indications based on a read
or error operation, modify error messages, or send packets to a peer device based upon
the received packet and/or our response.
:param received_packet: ATT request packet received from peer
:type received_packet: scapy.layers.bluetooth ATT packet with ATT header
:param our_response_packet: ATT response packet to be sent to our peer
:type our_response_packet: scapy.layers.bluetooth ATT packet with ATT header
:return: A flag to indicate whether we should send the response packet and the packet to send.
:rtype: bool, ATT packet body (header is appended automatically)
"""
send_packet = True
log.debug("ATT response hook triggered. Received packet: %s Send packet: %s packet: %s" % (received_packet, send_packet, our_response_packet))
return (send_packet, our_response_packet)
def att_prepare_queued_write_hook(self, gatt_handle, offset, data):
"""
Called when the peer device sends a Prepare Write request. This enables the attribute handle, offset,
and data from the request to be viewed and/or modified. Additionally, this allows the user to
deny the write from being performed.
:param gatt_handle: ATT handle of the target attribute
:type gatt_handle: int
:param offset: Offset to begin the write operation to the prepared write queue
:type offset: int
:param data: Data to write to the prepared write queue
:type data: str
:return: A flag to indicate if the value should be written to the prepared write queue, the offset to begin the write, and the data to write
:rtype: bool, int, int, str
"""
write_value_to_queue = True
log.debug("ATT queued write hook triggered. Write value to attribute pepared write queue"
"for attribute: %s on offset: %d with value: %s" % (hex(gatt_handle), offset, data))
return (write_value_to_queue, gatt_handle, offset, data)
def att_execute_queued_write_hook(self, flags):
"""
Called when the peer device sends an Execute Write request. This enables the flag
from the request to be viewed and/or modified. Additionally, this allows the user to
deny the write from being performed.
:param flags: Execute write flags
:type flags: int
:return: Flag to indicate that the execute write should continue and the execute write flags to pass along
:rtype: bool, int
"""
execute = True
log.debug("ATT execute write hook triggered. Action: %d" % flags)
return (execute, flags)
def att_write_hook(self, gatt_handle, data):
"""
Called when the peer device sends a write request. This enables the attribute handle and data
from the request to be viewed and/or modified. Additionally, this allows the user to
deny the write from being performed.
:param gatt_handle: ATT handle of the target attribute
:type gatt_handle: int
:param data: Data to write to the attribute
:type data: str
:return: Flag to indicate that the write should continue, the target attribute handle, and the data to write
:rtype: bool, int, str
"""
write_value_to_attribute = True
log.debug("ATT write hook triggered. Write value to attribute: %s value: %s" % (hex(gatt_handle), data))
return (write_value_to_attribute, gatt_handle, data)
|
tests/test_utils.py
|
drdarina/slack-machine
| 111 |
119313
|
from machine.utils.collections import CaseInsensitiveDict
from machine.utils import sizeof_fmt
from tests.singletons import FakeSingleton
def test_Singleton():
c = FakeSingleton()
c2 = FakeSingleton()
assert c == c2
def test_CaseInsensitiveDict():
d = CaseInsensitiveDict({'foo': 'bar'})
assert 'foo' in d
assert 'FoO' in d
def test_size_fmt():
byte_size = 500
assert sizeof_fmt(byte_size) == '500.0B'
kb_size = 1124
assert sizeof_fmt(kb_size) == '1.1KB'
gb_size = 168963795964
assert sizeof_fmt(gb_size) == '157.4GB'
|
dialogue_ope/airdialogue_model_transformer/generate_ope_data.py
|
deepneuralmachine/google-research
| 23,901 |
119342
|
<filename>dialogue_ope/airdialogue_model_transformer/generate_ope_data.py
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/facebookresearch/ParlAI/issues/2855
import json
import os
from copy import deepcopy
from tqdm import tqdm
from parlai.core.agents import create_agent, create_agent_from_model_file
from parlai.core.params import ParlaiParser
import parlai.utils.logging as logging
logging.disable()
SILENCE = '__SILENCE__'
PERSONA_PREFIX = 'your persona: '
ALL_SETTINGS = [
'5K',
'10K',
'20K',
'30K',
'40K',
'50K',
'75K',
'100K',
'150K',
'200K',
'250K',
'full',
'5K_w',
'10K_w',
'20K_w',
'30K_w',
'40K_w',
'50K_w',
'75K_w',
'100K_w',
'150K_w',
'200K_w',
'250K_w',
'full_w',
]
from flashtool import Logger
import sys
def setup_args():
parser = ParlaiParser(True, True)
parser.add_argument('--eval-dir', type=str, default='outputs/selfchat_eval/')
parser.add_argument('--log-file', type=str, default='log.jsonl')
parser.add_argument(
'--save-dir', type=str, default='outputs/selfchat_ope_data/')
parser.add_argument('--tgt-agent', type=str, default=None)
return parser
def get_context(kb):
# prepend kb
reservation = kb['reservation']
tickets = kb['kb']
keys = [
'price', 'num_connections', 'class', 'airline', 'departure_airport',
'departure_month', 'departure_day', 'departure_time_num',
'return_airport', 'return_month', 'return_day', 'return_time_num'
]
ticket_text = []
ticket_text.append('flight ' + 'None' + ' , ' + ' , '.join(
[k.replace('num_', '').replace('_num', '') + ' ' + 'None' for k in keys]))
for t in tickets:
assert set(t.keys()) == set(keys + ['flight_number']), f'{t}'
ticket_text.append('flight ' + str(t['flight_number']) + ' , ' +
' , '.join([
k.replace('num_', '').replace('_num', '') + ' ' +
str(t[k]).strip() for k in keys
]))
# replace for consistency
if reservation != 0:
t = tickets[reservation - 1000]
ticket_text.append('reservation ' + str(t['flight_number']) + ' , ' +
' , '.join([
k.replace('num_', '').replace('_num', '') + ' ' +
str(t[k]).strip() for k in keys
]))
else:
ticket_text.append('reservation ' + 'None' + ' , ' + ' , '.join([
k.replace('num_', '').replace('_num', '') + ' ' + 'None' for k in keys
]))
return ticket_text
def _run_conversation(conversation_id, conversation, tgt_agent, ref_agent):
tgt_agent.reset()
ref_agent.reset()
# process context
kb = conversation['kb']
ticket_text = get_context(kb)
tgt_agent_encoder_state = None
new_dialog = []
for turn_id, turn in enumerate(conversation['conversation']):
speaker = turn['speaker']
reference_text = turn['text']
if speaker == 'customer':
assert turn_id % 2 == 0
act = {'id': 'customer', 'text': reference_text, 'episode_done': False}
act['tickets'] = ticket_text[:-1]
act['reservation'] = ticket_text[-1]
act['return_encoder_state'] = True
# the following is just padding
act['action_name'] = 'none'
act['action_flight'] = []
act['action_intent'] = 'book'
act['action_status'] = 'book'
observed = ref_agent.observe(act)
observed = tgt_agent.observe(act)
new_dialog.append({'speaker': 'human_evaluator', 'text': turn['text']})
if speaker == 'agent':
assert turn_id % 2 == 1
ref_response = ref_agent.batch_act([ref_agent.observation])[0]
ref_agent.self_observe(ref_response)
tgt_response = tgt_agent.batch_act([tgt_agent.observation])[0]
tgt_agent.self_observe(deepcopy(ref_response))
assert tgt_response['id'] == ref_response['id']
if ref_response['text'] != reference_text:
logging.error(
f'{conversation_id}:{turn_id}: ref {repr(reference_text)} '
f'!= resp {repr(ref_response["text"])}. Context:\n{repr(observed)}')
import ipdb
ipdb.set_trace()
return False
new_dialog.append({'speaker': 'model', 'text': turn['text']})
new_dialog.append({'speaker': 'tgt_model', 'text': tgt_response['text']})
tgt_agent_encoder_state = tgt_response['encoder_states']
#else:
# logging.info(f'{conversation_id}:{turn_id} OK')
conversation['dialog'] = new_dialog
reward = tgt_agent.get_air_score(
tgt_agent_encoder_state,
conversation['expected_action'],
kb,
)
conversation['reward'] = {
'reward': reward['reward'],
'name_score': reward['name_score'],
'flight_score': reward['flight_score'],
'status_score': reward['status_score']
}
conversation.pop('conversation')
return True
def main():
parser = setup_args()
opt = parser.parse_args()
if opt['tgt_agent'] is None:
tgt_agent_list = ALL_SETTINGS
else:
tgt_agent_list = [opt['tgt_agent']]
for tgt_agent in tgt_agent_list:
save_dir = os.path.join(opt['save_dir'], tgt_agent)
os.makedirs(save_dir, exist_ok=True)
for ref_agent in ALL_SETTINGS:
#if ref_agent == tgt_agent:
# continue
print('Evaluating {} <-> {}'.format(tgt_agent, ref_agent))
eval_single(opt, tgt_agent, ref_agent, save_dir)
def eval_single(opt, tgt_agent, ref_agent, save_dir):
eval_file_path = opt['eval_dir'] + ref_agent + '/' + opt['log_file']
save_file_path = os.path.join(save_dir, ref_agent + '.jsonl')
model_mf = 'outputs/agent_' + tgt_agent + '/model'
model_optf = 'outputs/agent_' + tgt_agent + '/model.opt'
with open(model_optf) as f:
model_opt = json.load(f)
model_opt['interactive_mode'] = True
tgt_agent = create_agent_from_model_file(model_mf, model_opt)
model_mf = 'outputs/agent_' + ref_agent + '/model'
model_optf = 'outputs/agent_' + ref_agent + '/model.opt'
with open(model_optf) as f:
model_opt = json.load(f)
model_opt['interactive_mode'] = True
ref_agent = create_agent_from_model_file(model_mf, model_opt)
with open(eval_file_path) as eval_file, open(save_file_path,
'w') as save_file:
num_match = 0
errorids = []
for i, line in tqdm(enumerate(eval_file)):
if not line.strip():
continue
conversation = json.loads(line)
if _run_conversation(i, conversation, tgt_agent, ref_agent):
num_match += 1
assert conversation['dialog'][-1]['speaker'] == 'tgt_model'
assert len(conversation['dialog']) % 3 == 0
conversation['reward_ref'] = conversation.pop('report')
save_file.write(json.dumps(conversation) + '\n')
else:
errorids.append(i)
print('Matched: {}/{}'.format(num_match, (num_match + len(errorids))))
print('Error IDs: ', errorids)
if __name__ == '__main__':
main()
|
packages/pyright-internal/src/tests/samples/self3.py
|
martindemello/pyright
| 4,391 |
119350
|
<gh_stars>1000+
# This sample tests the special-case handling of Self when comparing
# two functions whose signatures differ only in the Self scope.
class SomeClass:
def __str__(self) -> str:
...
__repr__ = __str__
|
Trakttv.bundle/Contents/Libraries/Shared/plugin/models/m_plex/account.py
|
disrupted/Trakttv.bundle
| 1,346 |
119357
|
<gh_stars>1000+
from plugin.core.exceptions import AccountAuthenticationError
from plugin.models.core import db
from plugin.models.account import Account
from datetime import datetime, timedelta
from exception_wrappers.libraries.playhouse.apsw_ext import *
from plex import Plex
from urllib import urlencode
from urlparse import urlparse, parse_qsl
from xml.etree import ElementTree
import logging
import requests
REFRESH_INTERVAL = timedelta(days=1)
log = logging.getLogger(__name__)
class PlexAccount(Model):
class Meta:
database = db
db_table = 'plex.account'
account = ForeignKeyField(Account, 'plex_accounts', unique=True)
key = IntegerField(null=True, unique=True)
username = CharField(null=True, unique=True)
title = CharField(null=True)
thumb = TextField(null=True)
refreshed_at = DateTimeField(null=True)
def __init__(self, *args, **kwargs):
super(PlexAccount, self).__init__(*args, **kwargs)
self._basic_credential = None
@property
def account_id(self):
return self._data.get('account')
@property
def basic(self):
if self._basic_credential:
return self._basic_credential
return self.basic_credentials.first()
@basic.setter
def basic(self, value):
self._basic_credential = value
def authorization(self):
# Basic
basic = self.basic
if basic:
return self.basic_authorization(basic)
# No account authorization available
raise AccountAuthenticationError("Plex account hasn't been authenticated")
def basic_authorization(self, basic_credential=None):
if basic_credential is None:
basic_credential = self.basic
# Ensure token exists
if basic_credential.token_server is None:
raise AccountAuthenticationError("Plex account is missing the server token")
# Handle anonymous authentication
if basic_credential.token_server == 'anonymous':
log.debug('Using anonymous authorization for %r', self)
return Plex.configuration.authentication(token=None)
# Configure client
log.debug('Using basic authorization for %r', self)
return Plex.configuration.authentication(basic_credential.token_server)
def refresh(self, force=False, save=True):
# Retrieve credentials
basic = self.basic
if not basic:
return False
# Check if refresh is required
if self.refresh_required(basic):
force = True
# Only refresh account every `REFRESH_INTERVAL`
if not force and self.refreshed_at:
since_refresh = datetime.utcnow() - self.refreshed_at
if since_refresh < REFRESH_INTERVAL:
return False
# Refresh account details
if not self.refresh_details(basic):
return False
if not basic.refresh(force=True):
return False
# Store changes in database
self.refreshed_at = datetime.utcnow()
if save:
self.save()
return True
def refresh_details(self, basic):
if basic.token_plex == 'anonymous':
return self.refresh_anonymous()
log.info('Refreshing plex account: %r', self)
# Fetch account details
response = requests.get('https://plex.tv/users/account', headers={
'X-Plex-Token': basic.token_plex
})
if not (200 <= response.status_code < 300):
log.warn('Unable to retrieve account details from plex.tv (status_code: %s)', response.status_code)
return False
user = ElementTree.fromstring(response.content)
# Update details
self.username = user.attrib.get('username') or None
self.title = user.attrib.get('title')
self.thumb = user.attrib.get('thumb')
# Update `key`
if self.id == 1:
# Use administrator `key`
self.key = 1
else:
# Retrieve user id from plex.tv details
try:
user_id = int(user.attrib.get('id'))
except Exception as ex:
log.warn('Unable to cast user id to integer: %s', ex, exc_info=True)
user_id = None
# Update `key`
self.key = user_id
return True
def refresh_anonymous(self):
log.info('Refreshing plex account: %r (anonymous)', self)
self.username = 'administrator'
self.title = 'Administrator'
self.thumb = None
if self.id == 1:
self.key = 1
else:
self.key = None
return True
def refresh_required(self, basic):
if self.key is None:
return True
if self.title is None:
return True
if not basic.token_server:
return True
return False
def thumb_url(self, default=None, rating='pg', size=256):
if not self.thumb:
return None
thumb = urlparse(self.thumb)
if thumb.netloc.endswith('plex.tv'):
return self.thumb
if not thumb.netloc.endswith('gravatar.com'):
return None
result = 'https://secure.gravatar.com%s' % thumb.path
if default is None:
query = dict(parse_qsl(thumb.query))
default = query.get('d') or query.get('default')
return result + '?' + urlencode({
'd': default,
'r': rating,
's': size
})
def to_json(self, full=False):
result = {
'id': self.id,
'username': self.username,
'title': self.title,
'thumb_url': self.thumb_url()
}
if not full:
return result
# Merge authorization details
result['authorization'] = {
'basic': {'state': 'empty'}
}
# - Basic credentials
basic = self.basic
if basic is not None:
result['authorization']['basic'] = basic.to_json(self)
return result
def __repr__(self):
return '<PlexAccount username: %r>' % (
self.username,
)
|
examples/safe_cars_run.py
|
emsal1863/PILCO
| 277 |
119390
|
<filename>examples/safe_cars_run.py
import numpy as np
import tensorflow as tf
import gym
from pilco.models import PILCO
from pilco.controllers import RbfController, LinearController
from pilco.rewards import ExponentialReward, LinearReward
from linear_cars_env import LinearCars
from safe_pilco_extension.rewards_safe import RiskOfCollision, ObjectiveFunction
from safe_pilco_extension.safe_pilco import SafePILCO
from utils import rollout, policy
from gpflow import config
from gpflow import set_trainable
int_type = config.default_int()
float_type = config.default_float()
class Normalised_Env():
def __init__(self, m, std):
self.env = LinearCars()
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
self.m = m
self.std = std
def state_trans(self, x):
return np.divide(x-self.m, self.std)
def step(self, action):
ob, r, done, _ = self.env.step(action)
return self.state_trans(ob), r, done, {}
def reset(self):
ob = self.env.reset()
return self.state_trans(ob)
def render(self):
self.env.render()
def safe_cars(seed=0):
T = 25
th = 0.10
np.random.seed(seed)
J = 5
N = 5
eval_runs = 5
env = LinearCars()
# Initial random rollouts to generate a dataset
X1, Y1, _, _ = rollout(env, pilco=None, timesteps=T, verbose=True, random=True, render=False)
for i in range(1,5):
X1_, Y1_, _, _ = rollout(env, pilco=None, timesteps=T, verbose=True, random=True, render=False)
X1 = np.vstack((X1, X1_))
Y1 = np.vstack((Y1, Y1_))
env = Normalised_Env(np.mean(X1[:,:4],0), np.std(X1[:,:4], 0))
X, Y, _, _ = rollout(env, pilco=None, timesteps=T, verbose=True, random=True, render=False)
for i in range(1,J):
X_, Y_, _, _ = rollout(env, pilco=None, timesteps=T, verbose=True, random=True, render=False)
X = np.vstack((X, X_))
Y = np.vstack((Y, Y_))
state_dim = Y.shape[1]
control_dim = X.shape[1] - state_dim
m_init = np.transpose(X[0,:-1,None])
S_init = 0.1 * np.eye(state_dim)
controller = RbfController(state_dim=state_dim, control_dim=control_dim,
num_basis_functions=40, max_action=0.2)
#w1 = np.diag([1.5, 0.001, 0.001, 0.001])
#t1 = np.divide(np.array([3.0, 1.0, 3.0, 1.0]) - env.m, env.std)
#R1 = ExponentialReward(state_dim=state_dim, t=t1, W=w1)
# R1 = LinearReward(state_dim=state_dim, W=np.array([0.1, 0.0, 0.0, 0.0]))
R1 = LinearReward(state_dim=state_dim, W=np.array([1.0 * env.std[0], 0., 0., 0,]))
bound_x1 = 1 / env.std[0]
bound_x2 = 1 / env.std[2]
B = RiskOfCollision(2, [-bound_x1-env.m[0]/env.std[0], -bound_x2 - env.m[2]/env.std[2]],
[bound_x1 - env.m[0]/env.std[0], bound_x2 - env.m[2]/env.std[2]])
pilco = SafePILCO((X, Y), controller=controller, mu=-300.0, reward_add=R1, reward_mult=B, horizon=T, m_init=m_init, S_init=S_init)
for model in pilco.mgpr.models:
model.likelihood.variance.assign(0.001)
set_trainable(model.likelihood.variance, False)
# define tolerance
new_data = True
# init = tf.global_variables_initializer()
evaluation_returns_full = np.zeros((N, eval_runs))
evaluation_returns_sampled = np.zeros((N, eval_runs))
X_eval = []
for rollouts in range(N):
print("***ITERATION**** ", rollouts)
if new_data:
pilco.optimize_models(maxiter=100)
new_data = False
pilco.optimize_policy(maxiter=20, restarts=2)
# check safety
m_p = np.zeros((T, state_dim))
S_p = np.zeros((T, state_dim, state_dim))
predicted_risks = np.zeros(T)
predicted_rewards = np.zeros(T)
for h in range(T):
m_h, S_h, _ = pilco.predict(m_init, S_init, h)
m_p[h,:], S_p[h,:,:] = m_h[:], S_h[:,:]
predicted_risks[h], _ = B.compute_reward(m_h, S_h)
predicted_rewards[h], _ = R1.compute_reward(m_h, S_h)
overall_risk = 1 - np.prod(1.0-predicted_risks)
print("Predicted episode's return: ", sum(predicted_rewards))
print("Overall risk ", overall_risk)
print("Mu is ", pilco.mu.numpy())
print("bound1 ", bound_x1, " bound1 ", bound_x2)
if overall_risk < th:
X_new, Y_new, _, _ = rollout(env, pilco=pilco, timesteps=T, verbose=True, render=False)
new_data = True
X = np.vstack((X, X_new)); Y = np.vstack((Y, Y_new))
pilco.mgpr.set_data((X, Y))
if overall_risk < (th/4):
pilco.mu.assign(0.75 * pilco.mu.numpy())
else:
X_new, Y_new,_,_ = rollout(env, pilco=pilco, timesteps=T, verbose=True, render=False)
print(m_p[:,0] - X_new[:,0])
print(m_p[:,2] - X_new[:,2])
print("*********CHANGING***********")
_, _, r = pilco.predict(m_init, S_init, T)
print(r)
# to verify this actually changes, run the reward wrapper before and after on the same trajectory
pilco.mu.assign(1.5 * pilco.mu.numpy())
_, _, r = pilco.predict(m_init, S_init, T)
print(r)
if __name__=='__main__':
safe_cars()
|
solutions/problem_016.py
|
ksvr444/daily-coding-problem
| 1,921 |
119452
|
<reponame>ksvr444/daily-coding-problem
class OrderLog:
def __init__(self, size):
self.log = list()
self.size = size
def __repr__(self):
return str(self.log)
def record(self, order_id):
self.log.append(order_id)
if len(self.log) > self.size:
self.log = self.log[1:]
def get_last(self, i):
return self.log[-i]
log = OrderLog(5)
log.record(1)
log.record(2)
assert log.log == [1, 2]
log.record(3)
log.record(4)
log.record(5)
assert log.log == [1, 2, 3, 4, 5]
log.record(6)
log.record(7)
log.record(8)
assert log.log == [4, 5, 6, 7, 8]
assert log.get_last(4) == 5
assert log.get_last(1) == 8
|
reddit2telegram/channels/~inactive/reddit_wtf/app.py
|
CaringCat/reddit2telegram
| 187 |
119491
|
<filename>reddit2telegram/channels/~inactive/reddit_wtf/app.py
#encoding:utf-8
subreddit = 'wtf'
t_channel = '@reddit_wtf'
def send_post(submission, r2t):
return r2t.send_simple(submission)
|
cacheback/tasks.py
|
kevin-brown/django-cacheback
| 160 |
119505
|
<filename>cacheback/tasks.py
from celery import shared_task
from django.conf import settings
@shared_task(ignore_result=getattr(settings, 'CACHEBACK_TASK_IGNORE_RESULT', False))
def refresh_cache(klass_str, obj_args, obj_kwargs, call_args, call_kwargs):
from .base import Job
Job.perform_async_refresh(klass_str, obj_args, obj_kwargs, call_args, call_kwargs)
|
tests/unit2/test_is_point_in_polygon.py
|
yegarti/arcade
| 824 |
119508
|
import arcade
def test_point_in_rectangle():
polygon = [
(0, 0),
(0, 50),
(50, 50),
(50, 0),
]
result = arcade.is_point_in_polygon(25, 25, polygon)
assert result is True
def test_point_not_in_empty_polygon():
polygon = []
result = arcade.is_point_in_polygon(25, 25, polygon)
assert result is False
|
src/admin/database/demo.py
|
aimanow/sft
| 280 |
119510
|
<reponame>aimanow/sft
import sqlalchemy as sa
from sqlalchemy.orm import relationship
from godmode.database import database
demo_database = database("sqlite:///database/demo.sqlite", connect_args={"check_same_thread": False})
class User(demo_database.TableBase):
__table__ = sa.Table('users', demo_database.metadata, autoload=True)
class Post(demo_database.TableBase):
__table__ = sa.Table('posts', demo_database.metadata, autoload=True)
user = relationship('User')
|
recipes/Python/580616_Python_method_chaining_examples/recipe-580616.py
|
tdiprima/code
| 2,023 |
119513
|
'''
Program: string_processor.py
Demo of method chaining in Python.
By: <NAME> -
http://jugad2.blogspot.in/p/about-vasudev-ram.html
Copyright 2016 <NAME>
'''
import copy
class StringProcessor(object):
'''
A class to process strings in various ways.
'''
def __init__(self, st):
'''Pass a string for st'''
self._st = st
def lowercase(self):
'''Make lowercase'''
self._st = self._st.lower()
return self
def uppercase(self):
'''Make uppercase'''
self._st = self._st.upper()
return self
def capitalize(self):
'''Make first char capital (if letter); make other letters lower'''
self._st = self._st.capitalize()
return self
def delspace(self):
'''Delete spaces'''
self._st = self._st.replace(' ', '')
return self
def rep(self):
'''Like Python's repr'''
return self._st
def dup(self):
'''Duplicate the object'''
return copy.deepcopy(self)
def process_string(s):
print
sp = StringProcessor(s)
print 'Original:', sp.rep()
print 'After uppercase:', sp.dup().uppercase().rep()
print 'After lowercase:', sp.dup().lowercase().rep()
print 'After uppercase then capitalize:', sp.dup().uppercase().\
capitalize().rep()
print 'After delspace:', sp.dup().delspace().rep()
def main():
print "Demo of method chaining in Python:"
# Use extra spaces between words to show effect of delspace.
process_string('hOWz It GoInG?')
process_string('The QUIck brOWn fOx')
main()
|
PaddleNLP/Research/Dialogue-PLATO/plato/modules/layer_norm.py
|
XiaoguangHu01/models
| 1,319 |
119572
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
LayerNorm layer.
"""
# from paddle.fluid.dygraph import LayerNorm
from six.moves import reduce
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.dygraph import Layer
import logging
class LayerNorm(Layer):
""" Implement LayerNorm in dygraph mode. """
def __init__(self,
name_scope,
scale=True,
shift=True,
begin_norm_axis=1,
epsilon=1e-05,
param_attr=None,
bias_attr=None,
act=None):
super().__init__(name_scope)
self._scale = scale
self._shift = shift
self._begin_norm_axis = begin_norm_axis
self._epsilon = epsilon
self._param_attr = param_attr
self._bias_attr = bias_attr
self._act = act
return
def _build_once(self, input):
""" Create parameters. """
self._dtype = self._helper.input_dtype(input)
input_shape = input.shape
param_shape = [
reduce(lambda x, y: x * y, input_shape[self._begin_norm_axis:])
]
if self._scale:
self._scale_w = self.create_parameter(
attr=self._param_attr,
shape=param_shape,
dtype=self._dtype,
default_initializer=fluid.initializer.Constant(1.0))
else:
if self._param_attr:
logging.warn("param_attr are only avaliable with scale is True")
if self._shift:
assert self._bias_attr is not False
self._bias_w = self.create_parameter(
attr=self._bias_attr,
shape=param_shape,
dtype=self._dtype,
is_bias=True)
else:
if self._bias_attr:
logging.warn("bias_attr are only avaliable with shift is True")
return
def forward(self, x):
""" Forward process of LayerNorm. """
mean = layers.reduce_mean(x,
dim=list(range(self._begin_norm_axis, len(x.shape))),
keep_dim=True)
shift_x = layers.elementwise_sub(x=x, y=mean, axis=0)
variance = layers.reduce_mean(layers.square(shift_x),
dim=list(range(self._begin_norm_axis, len(x.shape))),
keep_dim=True)
r_stdev = layers.rsqrt(variance + self._epsilon)
norm_x = layers.elementwise_mul(x=shift_x, y=r_stdev, axis=0)
out = layers.elementwise_mul(x=norm_x, y=self._scale_w, axis=-1)
out = layers.elementwise_add(x=out, y=self._bias_w, axis=-1)
return out
|
tools/docker-images/cartridge-docker-images/service-images/tomcat-saml-sso/packs/plugins/TomcatServerStarterPlugin.py
|
TanJay/stratos
| 127 |
119613
|
<reponame>TanJay/stratos
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mdsclient
from plugins.contracts import ICartridgeAgentPlugin
import time
import zipfile
import subprocess
from modules.util.log import LogFactory
import os
class TomcatServerStarterPlugin(ICartridgeAgentPlugin):
def run_plugin(self, values):
log = LogFactory().get_log(__name__)
log.info("Starting tomcat server starter plugin...")
# wait till SAML_ENDPOINT becomes available
mds_response = None
while mds_response is None:
log.debug("Waiting for SAML_ENDPOINT to be available from metadata service for app ID: %s" % values["APPLICATION_ID"])
time.sleep(5)
mds_response = mdsclient.get(app=True)
if mds_response is not None and mds_response.properties.get("SAML_ENDPOINT") is None:
mds_response = None
saml_endpoint = mds_response.properties["SAML_ENDPOINT"]
log.debug("SAML_ENDPOINT value read from Metadata service: %s" % saml_endpoint)
# start tomcat
tomcat_start_command = "exec /opt/tomcat/bin/startup.sh"
log.info("Starting Tomcat server: [command] %s, [STRATOS_SAML_ENDPOINT] %s" % (tomcat_start_command, saml_endpoint))
env_var = os.environ.copy()
env_var["STRATOS_SAML_ENDPOINT"] = saml_endpoint
env_var["STRATOS_HOST_NAME"] = values["HOST_NAME"]
log.info("Reading port mappings...")
port_mappings_str = values["PORT_MAPPINGS"]
tomcat_http_port = None
# port mappings format: """NAME:mgt-console|PROTOCOL:https|PORT:4500|PROXY_PORT:8443;
# NAME:tomcat-http|PROTOCOL:http|PORT:4501|PROXY_PORT:7280;"""
log.info("Port mappings: %s" % port_mappings_str)
if port_mappings_str is not None:
port_mappings_array = port_mappings_str.split(";")
if port_mappings_array:
for port_mapping in port_mappings_array:
log.debug("port_mapping: %s" % port_mapping)
name_value_array = port_mapping.split("|")
name = name_value_array[0].split(":")[1]
protocol = name_value_array[1].split(":")[1]
port = name_value_array[2].split(":")[1]
if name == "tomcat-http" and protocol == "http":
tomcat_http_port = port
log.info("Kubernetes service port of tomcat http transport: %s" % tomcat_http_port)
env_var["STRATOS_HOST_PORT"] = tomcat_http_port
p = subprocess.Popen(tomcat_start_command, env=env_var, shell=True)
output, errors = p.communicate()
log.debug("Tomcat server started")
log.info("Tomcat server starter plugin completed")
|
toolchain/riscv/MSYS/python/Lib/symbol.py
|
zhiqiang-hu/bl_iot_sdk
| 207 |
119621
|
<reponame>zhiqiang-hu/bl_iot_sdk<filename>toolchain/riscv/MSYS/python/Lib/symbol.py
#! /usr/bin/env python3
"""Non-terminal symbols of Python grammar (from "graminit.h")."""
# This file is automatically generated; please don't muck it up!
#
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
# ./python Lib/symbol.py
#--start constants--
single_input = 256
file_input = 257
eval_input = 258
decorator = 259
decorators = 260
decorated = 261
async_funcdef = 262
funcdef = 263
parameters = 264
typedargslist = 265
tfpdef = 266
varargslist = 267
vfpdef = 268
stmt = 269
simple_stmt = 270
small_stmt = 271
expr_stmt = 272
annassign = 273
testlist_star_expr = 274
augassign = 275
del_stmt = 276
pass_stmt = 277
flow_stmt = 278
break_stmt = 279
continue_stmt = 280
return_stmt = 281
yield_stmt = 282
raise_stmt = 283
import_stmt = 284
import_name = 285
import_from = 286
import_as_name = 287
dotted_as_name = 288
import_as_names = 289
dotted_as_names = 290
dotted_name = 291
global_stmt = 292
nonlocal_stmt = 293
assert_stmt = 294
compound_stmt = 295
async_stmt = 296
if_stmt = 297
while_stmt = 298
for_stmt = 299
try_stmt = 300
with_stmt = 301
with_item = 302
except_clause = 303
suite = 304
test = 305
test_nocond = 306
lambdef = 307
lambdef_nocond = 308
or_test = 309
and_test = 310
not_test = 311
comparison = 312
comp_op = 313
star_expr = 314
expr = 315
xor_expr = 316
and_expr = 317
shift_expr = 318
arith_expr = 319
term = 320
factor = 321
power = 322
atom_expr = 323
atom = 324
testlist_comp = 325
trailer = 326
subscriptlist = 327
subscript = 328
sliceop = 329
exprlist = 330
testlist = 331
dictorsetmaker = 332
classdef = 333
arglist = 334
argument = 335
comp_iter = 336
sync_comp_for = 337
comp_for = 338
comp_if = 339
encoding_decl = 340
yield_expr = 341
yield_arg = 342
#--end constants--
sym_name = {}
for _name, _value in list(globals().items()):
if type(_value) is type(0):
sym_name[_value] = _name
def _main():
import sys
import token
if len(sys.argv) == 1:
sys.argv = sys.argv + ["Include/graminit.h", "Lib/symbol.py"]
token._main()
if __name__ == "__main__":
_main()
|
recipes/Python/496973_Get_versiWin32_Portable_Executable/recipe-496973.py
|
tdiprima/code
| 2,023 |
119679
|
def get_version_from_win32_pe(file):
# http://windowssdk.msdn.microsoft.com/en-us/library/ms646997.aspx
sig = struct.pack("32s", u"VS_VERSION_INFO".encode("utf-16-le"))
# This pulls the whole file into memory, so not very feasible for
# large binaries.
try:
filedata = open(file).read()
except IOError:
return "Unknown"
offset = filedata.find(sig)
if offset == -1:
return "Unknown"
filedata = filedata[offset + 32 : offset + 32 + (13*4)]
version_struct = struct.unpack("13I", filedata)
ver_ms, ver_ls = version_struct[4], version_struct[5]
return "%d.%d.%d.%d" % (ver_ls & 0x0000ffff, (ver_ms & 0xffff0000) >> 16,
ver_ms & 0x0000ffff, (ver_ls & 0xffff0000) >> 16)
|
ocpmodels/modules/scheduler.py
|
Irlirion/ocp
| 242 |
119693
|
import inspect
import torch.optim.lr_scheduler as lr_scheduler
from ocpmodels.common.utils import warmup_lr_lambda
class LRScheduler:
"""
Learning rate scheduler class for torch.optim learning rate schedulers
Notes:
If no learning rate scheduler is specified in the config the default
scheduler is warmup_lr_lambda (ocpmodels.common.utils) not no scheduler,
this is for backward-compatibility reasons. To run without a lr scheduler
specify scheduler: "Null" in the optim section of the config.
Args:
config (dict): Optim dict from the input config
optimizer (obj): torch optim object
"""
def __init__(self, optimizer, config):
self.optimizer = optimizer
self.config = config.copy()
if "scheduler" in self.config:
self.scheduler_type = self.config["scheduler"]
else:
self.scheduler_type = "LambdaLR"
scheduler_lambda_fn = lambda x: warmup_lr_lambda(x, self.config)
self.config["lr_lambda"] = scheduler_lambda_fn
if self.scheduler_type != "Null":
self.scheduler = getattr(lr_scheduler, self.scheduler_type)
scheduler_args = self.filter_kwargs(config)
self.scheduler = self.scheduler(optimizer, **scheduler_args)
def step(self, metrics=None, epoch=None):
if self.scheduler_type == "Null":
return
if self.scheduler_type == "ReduceLROnPlateau":
if metrics is None:
raise Exception(
"Validation set required for ReduceLROnPlateau."
)
self.scheduler.step(metrics)
else:
self.scheduler.step()
def filter_kwargs(self, config):
# adapted from https://stackoverflow.com/questions/26515595/
sig = inspect.signature(self.scheduler)
filter_keys = [
param.name
for param in sig.parameters.values()
if param.kind == param.POSITIONAL_OR_KEYWORD
]
filter_keys.remove("optimizer")
scheduler_args = {
arg: self.config[arg] for arg in self.config if arg in filter_keys
}
return scheduler_args
def get_lr(self):
for group in self.optimizer.param_groups:
return group["lr"]
|
foolbox/models/__init__.py
|
SamplingAndEnsemblingSolvers/foolbox
| 2,292 |
119704
|
<gh_stars>1000+
from .base import Model # noqa: F401
from .base import TransformBoundsWrapper # noqa: F401
from .pytorch import PyTorchModel # noqa: F401
from .tensorflow import TensorFlowModel # noqa: F401
from .jax import JAXModel # noqa: F401
from .numpy import NumPyModel # noqa: F401
from .wrappers import ThresholdingWrapper # noqa: F401
|
infra/bots/assets/skimage/create.py
|
pospx/external_skia
| 2,151 |
119725
|
<filename>infra/bots/assets/skimage/create.py
#!/usr/bin/env python
#
# Copyright 2016 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create the asset."""
import argparse
import common
from assets import asset_utils
def create_asset(target_dir):
"""Create the asset."""
# The common case is to add one or more images to the existing set. Therefore,
# download the previous version first.
asset = asset_utils.Asset(common.ASSET_NAME, asset_utils.MultiStore())
asset.download_current_version(target_dir)
# Allow the user to modify the contents of the target dir.
raw_input('Previous SKImage contents have been downloaded. Please make '
'your desired changes in the following directory and press enter '
'to continue:\n%s' % target_dir)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
|
train_image_classifier.py
|
kiralpoon/TwinGAN
| 736 |
119790
|
<filename>train_image_classifier.py<gh_stars>100-1000
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# This is a copy of slim/train_image_classifier with slight modifications
# to allow multiple labels for the same image.
# ==============================================================================
"""Generic training script that trains a model using a given dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
import util_misc
from datasets import convert_danbooru_data
from deployment import model_deploy
from model import model_inheritor
from nets import grad_cam
from nets import nets_factory
##########################
# Network and loss Flags #
##########################
tf.flags.DEFINE_string(
'model_name', 'inception_v3',
'The name of the image classification architecture used.')
tf.flags.DEFINE_float(
'classification_threshold', 0.25,
'Labels are considered to be present if the classification value crosses this threshold.'
'Currently used only when `do_eval_debug` flag is set.')
tf.flags.DEFINE_boolean(
'predict_multilabel', True,
'If true, we predict a single 0~1 score for each class. Otherwise the classes as mutually exclusive.'
)
tf.flags.DEFINE_boolean(
'output_single_file', False,
'If true, the output mode (where it outputs the predicted labels for each image) will only output to one file.')
tf.flags.DEFINE_string(
'output_single_file_name', 'output.csv',
'Name of the output file.')
FLAGS = tf.flags.FLAGS
PRELOGITS_LAYER_NAME = 'PreLogits'
class ClassifierModel(model_inheritor.GeneralModel):
"""This class has not yet been refactored."""
######################
# Select the network #
######################
def _select_network(self):
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=(self.num_classes - FLAGS.labels_offset),
weight_decay=FLAGS.weight_decay,
is_training=FLAGS.is_training,
)
return network_fn
####################
# Define the model #
####################
@staticmethod
def _clone_fn(networks, batch_queue, batch_names, data_batched=None, is_training=False, **kwargs):
"""Allows data parallelism by creating multiple clones of network_fn."""
data_batched = super(ClassifierModel, ClassifierModel)._get_data_batched(batch_queue, batch_names,
data_batched)
images = data_batched.get('source')
labels = data_batched.get('target')
if labels is None or images is None:
raise ValueError('images and labels have to be available in the dataset.')
network_fn = networks
try:
logits, end_points = network_fn(images, prediction_fn=tf.sigmoid, create_aux_logits=False)
except TypeError:
tf.logging.warning('Cannot set prediction_fn to sigmoid, or create_aux_logits to False!')
logits, end_points = network_fn(images, )
if FLAGS.dataset_dtype == 'float16' and 'AuxLogits' in end_points:
end_points['AuxLogits'] = tf.cast(end_points['AuxLogits'], tf.float32)
end_points['Logits'] = tf.cast(end_points['Logits'], tf.float32)
end_points['images'] = images
end_points['labels'] = labels
ClassifierModel.add_loss(data_batched, end_points)
return end_points
####################
# Define the loss #
####################
@staticmethod
def add_loss(data_batched, end_points, discriminator_network_fn=None):
targets = data_batched.get('target')
loss_fn = tf.losses.sigmoid_cross_entropy if FLAGS.predict_multilabel else tf.losses.softmax_cross_entropy
if 'AuxLogits' in end_points:
loss_fn(targets, end_points['AuxLogits'], weights=0.4, scope='aux_loss')
loss_fn(targets, end_points['Logits'], weights=1.0)
def _add_optimization(self, clones, optimizer, summaries, update_ops, global_step):
# Variables to train.
variables_to_train = self._get_variables_to_train()
# and returns a train_tensor and summary_op
total_loss, clones_gradients = model_deploy.optimize_clones(
clones,
optimizer,
gradient_scale=self._get_gradient_scale(),
var_list=variables_to_train)
# Add total_loss to summary.
summaries.add(tf.summary.scalar('total_loss', total_loss))
if clones_gradients:
# Add summaries to the gradients.
summaries |= set(model_deploy.add_gradients_summaries(clones_gradients))
# Create gradient updates.
grad_updates = optimizer.apply_gradients(clones_gradients,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name='train_op')
return train_tensor
#############
# Summaries #
#############
@staticmethod
def _define_eval_metrics(end_points, data_batched):
metric_map = super(ClassifierModel, ClassifierModel)._define_eval_metrics(end_points, data_batched)
# Define the metrics:
# streaming_auc requires inputs to be within [0,1]
targets = data_batched.get('target')
clipped_predictions = tf.clip_by_value(end_points['Predictions'], 0, 1)
metric_map['AUC'] = tf.metrics.auc(targets, clipped_predictions)
metric_map['mean_squared_error'] = slim.metrics.streaming_mean_squared_error(end_points['Predictions'], targets)
metric_map['precision_at_thresholds'] = tf.metrics.precision_at_thresholds(targets, clipped_predictions,
[i / 10.0 for i in range(0, 11)])
metric_map['recall_at_thresholds'] = tf.metrics.recall_at_thresholds(targets, clipped_predictions,
[i / 10.0 for i in range(0, 11)])
return metric_map
def _add_image_summaries(self, end_points, summaries):
# Add summaries for images, if there are any.
if self._maybe_is_image(end_points['images']):
self._add_one_image_summary('images', end_points['images'])
@staticmethod
def _add_loss_summaries(first_clone_scope, summaries, end_points):
super(ClassifierModel, ClassifierModel)._add_loss_summaries(first_clone_scope, summaries, end_points)
# Adds loss metrics.
if 'Predictions' in end_points:
auc, auc_op = tf.metrics.auc(end_points['labels'], tf.clip_by_value(end_points['Predictions'], 0, 1),
updates_collections=tf.GraphKeys.UPDATE_OPS)
summaries.add(tf.summary.scalar('losses/auc_metric', auc))
else:
tf.logging.warning('Cannot calculate the auc because there is no endpoint called "Predictions".')
###########################
# Eval and output results #
###########################
def get_items_to_encode(self, end_points, data_batched):
"""Outputs a list with format (name, is_image, tensor)"""
targets = data_batched.get('target')
items_to_encode = [
('sources', True, self._post_process_image(data_batched.get('source'))),
('targets', False, targets),
('predictions', False, end_points['Predictions']),
]
for class_i in range(10):
grad_cam_mask_class_i = grad_cam.grad_cam(FLAGS.model_name, end_points, class_i)
masked_source_class_i = grad_cam.impose_mask_on_image(grad_cam_mask_class_i, data_batched)
one_hot_class = tf.one_hot([class_i for _ in range(targets.shape[0])], targets.shape[-1])
items_to_encode.append(('class_%d_name' % (class_i), False, one_hot_class))
items_to_encode.append(('masked_source_class_%d' % (class_i), True,
self._post_process_image(masked_source_class_i)), )
return items_to_encode
@staticmethod
def to_human_friendly(eval_items, delimiter=' '):
ret = []
labels_dict = util_misc.get_tags_dict(FLAGS.tags_id_lookup_file, FLAGS.tags_key_column_index,
FLAGS.tags_value_column_index)
for name, is_image, vals in eval_items:
if is_image:
ret.append((name, is_image, vals))
else:
human_readable_vals = []
for val in vals:
if isinstance(val, str):
human_readable_vals = vals
break
human_readable_val = []
if FLAGS.process_mutually_exclusive_labels:
val = util_misc.process_anime_face_labels(val, FLAGS.classification_threshold)
for i, item in enumerate(val):
# The best way is to get the threshold from an AUC eval.
if item >= FLAGS.classification_threshold:
human_readable_val.append(labels_dict.get(i, 'UKNOWN_LABEL'))
human_readable_vals.append(' '.join(human_readable_val))
ret.append((name, is_image, human_readable_vals))
return ret
@staticmethod
def _define_outputs(end_points, data_batched):
"""Output label predictions for each image."""
if FLAGS.output_single_file:
return [
('prelogits', False, end_points[PRELOGITS_LAYER_NAME]),
('filename', False, data_batched.get('filename')),
('predictions', False, end_points['Predictions']),
]
else:
return [
('sources', True, ClassifierModel._post_process_image(data_batched.get('source'))),
('filename', False, data_batched.get('filename')),
('predictions', False, end_points['Predictions']),
]
@staticmethod
def _write_outputs(output_results, output_ops, ):
save_dir = FLAGS.eval_dir
if FLAGS.output_single_file:
single_file_name = os.path.join(save_dir, FLAGS.output_single_file_name)
# Flatten the prelogits
output_results[0] = np.reshape(output_results[0], [output_results[0].shape[0], output_results[0].shape[-1]])
output_results = [item.tolist() for item in output_results]
with open(single_file_name, 'ab') as f:
writer = csv.writer(f)
writer.writerows([[output_results[1][i]] + output_results[0][i] + output_results[2][i]
for i in range(len(output_results[0]))])
else:
encoded_list = []
for j in range(len(output_results)):
encoded_list.append(output_ops[j][:-1] + (output_results[j].tolist(),))
items = ClassifierModel.save_images(encoded_list, save_dir)
human_friendly_results = ClassifierModel.to_human_friendly(items,
delimiter=convert_danbooru_data.TAG_TEXT_DELIMITER)
num_labels_written = 0
for i, predictions in enumerate(human_friendly_results[-1][-1]):
if FLAGS.process_mutually_exclusive_labels:
if not predictions:
try:
tf.gfile.Remove(os.path.join(save_dir, human_friendly_results[0][2][i]))
except tf.errors.OpError as e:
tf.logging.warning(e)
continue # Skip empty predictions. (The image will still be written but there will be no label).
image_name = human_friendly_results[1][2][i]
try:
tf.gfile.Rename(os.path.join(save_dir, human_friendly_results[0][2][i]), os.path.join(save_dir, image_name))
except tf.errors.OpError as e:
tf.logging.warning(e)
tags_file_path = os.path.join(save_dir, image_name + '.txt')
with open(tags_file_path, 'w') as f:
f.write(predictions)
num_labels_written += 1
tf.logging.info('%d label files are written.' % num_labels_written)
def main(self):
super(ClassifierModel, self).main()
def main(_):
model = ClassifierModel()
model.main()
if __name__ == '__main__':
tf.app.run()
|
datasets/code_x_glue_tt_text_to_text/common.py
|
WojciechKusa/datasets
| 10,608 |
119793
|
from typing import List
import datasets
# Citation, taken from https://github.com/microsoft/CodeXGLUE
_DEFAULT_CITATION = """@article{CodeXGLUE,
title={CodeXGLUE: A Benchmark Dataset and Open Challenge for Code Intelligence},
year={2020},}"""
class Child:
_DESCRIPTION = None
_FEATURES = None
_CITATION = None
SPLITS = {"train": datasets.Split.TRAIN}
_SUPERVISED_KEYS = None
def __init__(self, info):
self.info = info
def homepage(self):
return self.info["project_url"]
def _info(self):
# This is the description that will appear on the datasets page.
return datasets.DatasetInfo(
description=self.info["description"] + "\n\n" + self._DESCRIPTION,
features=datasets.Features(self._FEATURES),
homepage=self.homepage(),
citation=self._CITATION or _DEFAULT_CITATION,
supervised_keys=self._SUPERVISED_KEYS,
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
SPLITS = self.SPLITS
_URL = self.info["raw_url"]
urls_to_download = {}
for split in SPLITS:
if split not in urls_to_download:
urls_to_download[split] = {}
for key, url in self.generate_urls(split):
if not url.startswith("http"):
url = _URL + "/" + url
urls_to_download[split][key] = url
downloaded_files = {}
for k, v in urls_to_download.items():
downloaded_files[k] = dl_manager.download_and_extract(v)
return [
datasets.SplitGenerator(
name=SPLITS[k],
gen_kwargs={"split_name": k, "file_paths": downloaded_files[k]},
)
for k in SPLITS
]
def check_empty(self, entries):
all_empty = all([v == "" for v in entries.values()])
all_non_empty = all([v != "" for v in entries.values()])
if not all_non_empty and not all_empty:
raise RuntimeError("Parallel data files should have the same number of lines.")
return all_empty
class TrainValidTestChild(Child):
SPLITS = {
"train": datasets.Split.TRAIN,
"valid": datasets.Split.VALIDATION,
"test": datasets.Split.TEST,
}
|
test_python_toolbox/test_cute_iter_tools/test_pushback_iterator.py
|
hboshnak/python_toolbox
| 119 |
119826
|
# Copyright 2009-2017 <NAME>.
# This program is distributed under the MIT license.
from python_toolbox import cute_testing
from python_toolbox.cute_iter_tools import PushbackIterator
def test_pushback_iterator():
pushback_iterator = PushbackIterator(iter([1, 2, 3]))
assert next(pushback_iterator) == 1
assert next(pushback_iterator) == 2
pushback_iterator.push_back()
assert next(pushback_iterator) == 2
assert next(pushback_iterator) == 3
pushback_iterator.push_back()
assert next(pushback_iterator) == 3
with cute_testing.RaiseAssertor(StopIteration):
next(pushback_iterator)
pushback_iterator.push_back()
assert next(pushback_iterator) == 3
with cute_testing.RaiseAssertor(StopIteration):
next(pushback_iterator)
|
Samples/Hosting/Scenarios/register_user_commands.py
|
TwoUnderscorez/dlr
| 307 |
119829
|
import App
def foo():
print 'hello world'
App.UserCommands['foo'] = foo
|
test_protocol/lfw/face_cropper/crop_calfw_by_arcface.py
|
weihaoxie/FaceX-Zoo
| 1,329 |
119846
|
<gh_stars>1000+
"""
@author: <NAME>
@date: 20201012
@contact: <EMAIL>
"""
import os
import sys
import math
import multiprocessing
import cv2
sys.path.append('/export/home/wangjun492/wj_armory/faceX-Zoo/face_sdk')
from core.image_cropper.arcface_face_recognition.FaceRecImageCropper import FaceRecImageCropper
def crop_calfw(calfw_root, calfw_lmk_root, target_folder):
face_cropper = FaceRecImageCropper()
file_list = os.listdir(calfw_root)
for cur_file in file_list:
if cur_file.endswith('.jpg'):
cur_file_path = os.path.join(calfw_root, cur_file)
cur_image = cv2.imread(cur_file_path)
face_lms = []
cur_file_name = os.path.splitext(cur_file)[0]
cur_lms_file_name = cur_file_name + '_5loc_attri.txt'
cur_lms_file_path = os.path.join(calfw_lmk_root, cur_lms_file_name)
cur_lms_buf = open(cur_lms_file_path)
line = cur_lms_buf.readline().strip()
while line:
line_strs = line.split(' ')
face_lms.extend(line_strs)
line = cur_lms_buf.readline().strip()
face_lms = [float(s) for s in face_lms]
face_lms = [int(num) for num in face_lms]
cur_cropped_image = face_cropper.crop_image_by_mat(cur_image, face_lms)
target_path = os.path.join(target_folder, cur_file)
cv2.imwrite(target_path, cur_cropped_image)
if __name__ == '__main__':
calfw_root = '/export/home/wangjun492/wj_armory/faceX-Zoo/face_recognition/face_evaluation/calfw/data/images&landmarks/images&landmarks/images'
calfw_lmk_root = '/export/home/wangjun492/wj_armory/faceX-Zoo/face_recognition/face_evaluation/calfw/data/images&landmarks/images&landmarks/CA_landmarks'
target_folder = '/export/home/wangjun492/wj_armory/faceX-Zoo/face_recognition/face_evaluation/calfw/calfw_crop'
crop_calfw(calfw_root, calfw_lmk_root, target_folder)
|
python/app/thirdparty/dirsearch/lib/connection/requester.py
|
taomujian/linbing
| 351 |
119849
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
# Author: <NAME>
import urllib3
import http.client
import random
import socket
import thirdparty.requests as requests
from urllib.parse import urlparse, urljoin
from lib.utils.fmt import safequote
from thirdparty.requests.adapters import HTTPAdapter
from thirdparty.requests.auth import HTTPBasicAuth, HTTPDigestAuth
from thirdparty.requests_ntlm import HttpNtlmAuth
from .request_exception import RequestException
from .response import Response
urllib3.disable_warnings()
class Requester(object):
def __init__(
self,
url,
max_pool=1,
max_retries=5,
timeout=20,
ip=None,
proxy=None,
proxylist=None,
redirect=False,
request_by_hostname=False,
httpmethod="get",
data=None,
scheme=None,
):
self.httpmethod = httpmethod
self.data = data
self.headers = {}
parsed = urlparse(url)
# If no protocol specified, set http by default
if "://" not in url:
parsed = urlparse("{0}://{1}".format(scheme, url))
# If protocol is not supported
elif parsed.scheme not in ["https", "http"]:
raise RequestException({"message": "Unsupported URL scheme: {0}".format(parsed.scheme)})
self.base_path = parsed.path
if parsed.path.startswith("/"):
self.base_path = parsed.path[1:]
# Safe quote all special characters in base_path to prevent from being encoded
self.base_path = safequote(self.base_path)
self.protocol = parsed.scheme
self.host = parsed.netloc.split(":")[0]
# Resolve DNS to decrease overhead
if ip:
self.ip = ip
# A proxy could have a different DNS that would resolve the name. ThereFore.
# resolving the name when using proxy to raise an error is pointless
elif not proxy and not proxylist:
try:
self.ip = socket.gethostbyname(self.host)
except socket.gaierror:
# Check if hostname resolves to IPv6 address only
try:
self.ip = socket.getaddrinfo(self.host, None, socket.AF_INET6)[0][4][0]
except socket.gaierror:
raise RequestException({"message": "Couldn't resolve DNS"})
# If no port specified, set default (80, 443)
try:
self.port = int(parsed.netloc.split(":")[1])
except IndexError:
self.port = 443 if self.protocol == "https" else 80
except ValueError:
raise RequestException(
{"message": "Invalid port number: {0}".format(parsed.netloc.split(":")[1])}
)
# Set the Host header, this will be overwritten if the user has already set the header
self.headers["Host"] = self.host
# Include port in Host header if it's non-standard
if (self.protocol == "https" and self.port != 443) or (
self.protocol == "http" and self.port != 80
):
self.headers["Host"] += ":{0}".format(self.port)
self.max_retries = max_retries
self.max_pool = max_pool
self.timeout = timeout
self.pool = None
self.proxy = proxy
self.proxylist = proxylist
self.redirect = redirect
self.random_agents = None
self.auth = None
self.request_by_hostname = request_by_hostname
self.session = requests.Session()
self.url = "{0}://{1}:{2}/".format(
self.protocol,
self.host if self.request_by_hostname else self.ip,
self.port,
)
self.base_url = "{0}://{1}:{2}/".format(
self.protocol,
self.host,
self.port,
)
self.set_adapter()
def set_adapter(self):
self.session.mount(self.url, HTTPAdapter(max_retries=self.max_retries))
def set_header(self, key, value):
self.headers[key.strip()] = value.strip() if value else value
def set_random_agents(self, agents):
self.random_agents = list(agents)
def set_auth(self, type, credential):
if type == "bearer":
self.set_header("Authorization", "Bearer {0}".format(credential))
else:
user = credential.split(":")[0]
try:
password = ":".join(credential.split(":")[1:])
except IndexError:
password = ""
if type == "basic":
self.auth = HTTPBasicAuth(user, password)
elif type == "digest":
self.auth = HTTPDigestAuth(user, password)
else:
self.auth = HttpNtlmAuth(user, password)
def request(self, path, proxy=None):
result = None
try:
if not proxy:
if self.proxylist:
proxy = random.choice(self.proxylist)
elif self.proxy:
proxy = self.proxy
if proxy:
if not proxy.startswith(
("http://", "https://", "socks5://", "socks5h://", "socks4://", "socks4a://")
):
proxy = "http://" + proxy
if proxy.startswith("https://"):
proxies = {"https": proxy}
else:
proxies = {"https": proxy, "http": proxy}
else:
proxies = None
url = self.url + self.base_path + path
if self.random_agents:
self.headers["User-Agent"] = random.choice(self.random_agents)
"""
We can't just do `allow_redirects=True` because we set the host header in
optional request headers, which will be kept in next requests (follow redirects)
"""
headers = self.headers.copy()
for i in range(6):
request = requests.Request(
self.httpmethod,
url=url,
headers=headers,
auth=self.auth,
data=self.data,
)
prepare = request.prepare()
prepare.url = url
response = self.session.send(
prepare,
proxies=proxies,
allow_redirects=False,
timeout=self.timeout,
stream=True,
verify=False,
)
result = Response(response)
if self.redirect and result.redirect:
url = urljoin(url, result.redirect)
headers["Host"] = url.split("/")[2]
continue
elif i == 5:
raise requests.exceptions.TooManyRedirects
break
except requests.exceptions.SSLError:
self.url = self.base_url
self.set_adapter()
result = self.request(path, proxy=proxy)
except requests.exceptions.TooManyRedirects:
raise RequestException(
{"message": "Too many redirects: {0}".format(self.base_url)}
)
except requests.exceptions.ProxyError:
raise RequestException(
{"message": "Error with the proxy: {0}".format(proxy)}
)
except requests.exceptions.ConnectionError:
raise RequestException(
{"message": "Cannot connect to: {0}:{1}".format(self.host, self.port)}
)
except requests.exceptions.InvalidURL:
raise RequestException(
{"message": "Invalid URL: {0}".format(self.base_url)}
)
except requests.exceptions.InvalidProxyURL:
raise RequestException(
{"message": "Invalid proxy URL: {0}".format(proxy)}
)
except (
requests.exceptions.ConnectTimeout,
requests.exceptions.ReadTimeout,
requests.exceptions.Timeout,
http.client.IncompleteRead,
socket.timeout,
):
raise RequestException(
{"message": "Request timeout: {0}".format(self.base_url)}
)
except Exception:
raise RequestException(
{"message": "There was a problem in the request to: {0}".format(self.base_url)}
)
return result
|
samples/gacha/gacha.py
|
Ryoich/python_zero
| 203 |
119862
|
<filename>samples/gacha/gacha.py
import random
N = 44
R = 100
prob = [0.0]*R
trial = 10000
for _ in range(trial):
a = []
n = 0
for i in range(R):
a.append(random.randint(1, N))
if len(set(a)) > n:
prob[i] = prob[i] + 1
n = len(set(a))
for i in range(R):
s = "{} {}".format(i, prob[i]/trial)
print(s)
|
tests/integration/test_runbooks_render_runbooks.py
|
madflojo/automon
| 414 |
119922
|
'''
Test Runbooks.py render_runbooks()
'''
import mock
import unittest
from runbooks import render_runbooks
class RenderRunbooksIntegrationTest(unittest.TestCase):
''' Run unit tests against the cache_runbooks method '''
def setUp(self):
''' Setup mocked data '''
self.runbooks = """
yaml: {{facts['data']}}
"""
self.facts = {'data': True}
def tearDown(self):
''' Destroy mocked data '''
self.runbooks = None
self.facts = None
class RunwithGoodData(RenderRunbooksIntegrationTest):
''' Test when given good data '''
def runTest(self):
''' Execute test '''
self.assertEqual(render_runbooks(self.runbooks, self.facts), {'yaml': True})
class RunwithNoData(RenderRunbooksIntegrationTest):
''' Test when given no data '''
def runTest(self):
''' Execute test '''
self.runbooks = ""
self.facts = ""
self.assertIsNone(render_runbooks(self.runbooks, self.facts))
class RunwithBadData(RenderRunbooksIntegrationTest):
''' Test when given bad data '''
def runTest(self):
''' Execute test '''
self.runbooks = "notrealyaml"
# Output should be same as input
self.assertEqual(render_runbooks(self.runbooks, self.facts), "notrealyaml")
|
koalixcrm/accounting/migrations/0001_initial.py
|
Cataldir/koalixcrm
| 290 |
119930
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-05 17:02
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('accountNumber', models.IntegerField(verbose_name='Account Number')),
('title', models.CharField(max_length=50, verbose_name='Account Title')),
('accountType', models.CharField(
choices=[('E', 'Earnings'), ('S', 'Spendings'), ('L', 'Liabilities'), ('A', 'Assets')],
max_length=1, verbose_name='Account Type')),
('description', models.TextField(blank=True, null=True, verbose_name='Description')),
('originalAmount',
models.DecimalField(decimal_places=2, default=0.0, max_digits=20, verbose_name='Original Amount')),
('isopenreliabilitiesaccount', models.BooleanField(verbose_name='Is The Open Liabilities Account')),
('isopeninterestaccount', models.BooleanField(verbose_name='Is The Open Interests Account')),
('isProductInventoryActiva', models.BooleanField(verbose_name='Is a Product Inventory Account')),
('isACustomerPaymentAccount', models.BooleanField(verbose_name='Is a Customer Payment Account')),
],
options={
'ordering': ['accountNumber'],
'verbose_name': 'Account',
'verbose_name_plural': 'Account',
},
),
migrations.CreateModel(
name='AccountingPeriod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='Title')),
('begin', models.DateField(verbose_name='Begin')),
('end', models.DateField(verbose_name='End')),
],
options={
'verbose_name': 'Accounting Period',
'verbose_name_plural': 'Accounting Periods',
},
),
migrations.CreateModel(
name='Booking',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=2, max_digits=20, verbose_name='Amount')),
('description', models.CharField(blank=True, max_length=120, null=True, verbose_name='Description')),
('bookingDate', models.DateTimeField(verbose_name='Booking at')),
('dateofcreation', models.DateTimeField(auto_now=True, verbose_name='Created at')),
('lastmodification', models.DateTimeField(auto_now_add=True, verbose_name='Last modified')),
('accountingPeriod',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounting.AccountingPeriod',
verbose_name='AccountingPeriod')),
],
options={
'verbose_name': 'Booking',
'verbose_name_plural': 'Bookings',
},
),
migrations.CreateModel(
name='ProductCategorie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, verbose_name='Product Categorie Title')),
('lossAccount',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='db_loss_account',
to='accounting.Account', verbose_name='Loss Account')),
('profitAccount',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='db_profit_account',
to='accounting.Account', verbose_name='Profit Account')),
],
options={
'verbose_name': 'Product Categorie',
'verbose_name_plural': 'Product Categories',
},
),
]
|
nn_modules/gzip-js/test/zipTest.py
|
PotWallet/PotWallet-Chain
| 305 |
119965
|
<reponame>PotWallet/PotWallet-Chain
import os
from helpers import run_cmd
from colorama import Fore
defaultTestDir = 'test-files'
defaultOutDir = 'test-outs'
"""
Run a single test
@param tFile- required; the full path to the file to run
@param level- optional (default: all); the compression level [1-9]
@return True if all tests passed; False if at least one test failed
"""
def runTest(tFile, level=None, outDir=defaultOutDir):
passed = True
if level == None:
for x in range(1, 10):
if runTest(tFile, x, outDir) == False:
passed = False
return passed
out1 = os.path.join(outDir, '%(file)s.%(level)d.gz' % {'file': os.path.basename(tFile), 'level' : level})
out2 = os.path.join(outDir, '%(file)s.%(level)d.out.gz' % {'file': os.path.basename(tFile), 'level' : level})
run_cmd('gzip -c -%(level)d %(file)s > %(outfile)s' % {'level' : level, 'file' : tFile, 'outfile' : out1})
run_cmd('../bin/gzip.js --level %(level)d --file %(file)s --output %(output)s' % {'level' : level, 'file' : tFile, 'output' : out2})
result = run_cmd('diff %(file1)s %(file2)s' % {'file1' : out1, 'file2' : out2})
if result['returncode'] == 0:
status = Fore.GREEN + 'PASSED' + Fore.RESET
else:
passed = False
status = Fore.RED + 'FAILED' + Fore.RESET
print 'Level %(level)d: %(status)s' % {'level' : level, 'status' : status}
return passed
"""
Runs all tests on the given level. This iterates throuth the testDir directory defined above.
@param level- The level to run on [1-9] (default: None, runs on all levels all)
@return True if all levels passed, False if at least one failed
"""
def runAll(level=None, testDir=defaultTestDir):
passed = True
for tFile in os.listdir(testDir):
fullPath = os.path.join(testDir, tFile)
print Fore.YELLOW + tFile + Fore.RESET
if runTest(fullPath, level) == False:
passed = False
print ''
return passed
|
tests/conftest.py
|
textanalytics2020/laserembeddings
| 177 |
119998
|
import os
import pytest
@pytest.fixture
def test_data():
import numpy as np
test_data_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'data', 'laserembeddings-test-data.npz')
return np.load(test_data_file) if os.path.isfile(test_data_file) else None
|
tests/model_package/models/__init__.py
|
webjunkie/django
| 790 |
120006
|
<filename>tests/model_package/models/__init__.py<gh_stars>100-1000
# Import all the models from subpackages
from __future__ import absolute_import
from .article import Article
from .publication import Publication
|
jaclearn/nlp/tree/constituency.py
|
dapatil211/Jacinle
| 114 |
120061
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : constituency.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 07/04/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
"""
Constituency Tree.
"""
import jacinle.random as random
from jacinle.utils.enum import JacEnum
from .ptb import PTBNode
from .traversal import traversal
TEMP_NODE = '<TEMP>'
def _new_temp_node(token=None):
return PTBNode(TEMP_NODE, token)
def binarize_tree(tree):
def dc(root, children):
n = len(children)
if n == 1:
return children[0]
lhs = children[:n // 2]
rhs = children[n // 2:]
for part in [lhs, rhs]:
if len(part) == 1:
root.append_child(part[0])
else:
imm = _new_temp_node()
imm.attach(root)
dc(imm, part)
def dfs(node):
for x in node.children:
dfs(x)
n = len(node.children)
if n == 0:
pass
elif n == 1:
y, z = node, node.children[0]
x, sibling_ind = y.detach()
z.detach()
z.vtype = y.vtype
if x is None:
node = z
else:
z.attach(x, sibling_ind)
elif n == 2:
pass
else:
children = node.children.copy()
for x in children:
x.detach()
dc(node, children)
return node
return dfs(tree.clone())
def make_balanced_binary_tree(sequence):
root = _new_temp_node()
for x in sequence:
_new_temp_node(x).attach(root)
return binarize_tree(root)
class StepMaskSelectionMode(JacEnum):
FIRST = 'first'
RANDOM = 'random'
def compose_bianry_tree_step_masks(tree, selection='first'):
selection = StepMaskSelectionMode.from_string(selection)
nodes = list(traversal(tree, 'pre'))
clean_nodes = {x for x in nodes if x.is_leaf}
ever_clean_nodes = clean_nodes.copy()
answer = []
while len(clean_nodes) > 1:
# all allowed nodes
allowed = {x: i for i, x in enumerate(nodes) if (
x not in ever_clean_nodes and
all(map(lambda y: y in clean_nodes, x.children))
)}
# project it to
allowed_projected = {x for x in clean_nodes if (
x.sibling_ind == 0 and x.father in allowed
)}
ordered_clean_nodes = [x for x in nodes if x in clean_nodes]
clean_nodes_indices = {x: i for i, x in enumerate(ordered_clean_nodes)}
if selection is StepMaskSelectionMode.FIRST:
selected = nodes[min(allowed.values())]
elif selection is StepMaskSelectionMode.RANDOM:
selected = random.choice_list(list(allowed))
else:
raise ValueError('Unknown StepMaskSelectionMode: {}.'.format(selection))
mask_allowed_projected = [1 if x in allowed_projected else 0 for x in ordered_clean_nodes]
assert len(selected.children) == 2
# sanity check.
lson = clean_nodes_indices[selected.children[0]]
rson = clean_nodes_indices[selected.children[1]]
assert lson + 1 == rson
clean_nodes.difference_update(selected.children)
clean_nodes.add(selected)
ever_clean_nodes.add(selected)
answer.append((lson, mask_allowed_projected))
return answer
|
qutip/settings.py
|
quantshah/qutip
| 1,205 |
120098
|
<filename>qutip/settings.py
"""
This module contains settings for the QuTiP graphics, multiprocessing, and
tidyup functionality, etc.
"""
from __future__ import absolute_import
# use auto tidyup
auto_tidyup = True
# use auto tidyup dims on multiplication
auto_tidyup_dims = True
# detect hermiticity
auto_herm = True
# general absolute tolerance
atol = 1e-12
# use auto tidyup absolute tolerance
auto_tidyup_atol = 1e-12
# number of cpus (set at qutip import)
num_cpus = 0
# flag indicating if fortran module is installed
# never used
fortran = False
# path to the MKL library
mkl_lib = None
# Flag if mkl_lib is found
has_mkl = False
# Has OPENMP
has_openmp = False
# debug mode for development
debug = False
# Running on mac with openblas make eigh unsafe
eigh_unsafe = False
# are we in IPython? Note that this cannot be
# set by the RC file.
ipython = False
# define whether log handler should be
# - default: switch based on IPython detection
# - stream: set up non-propagating StreamHandler
# - basic: call basicConfig
# - null: leave logging to the user
log_handler = 'default'
# Allow for a colorblind mode that uses different colormaps
# and plotting options by default.
colorblind_safe = False
# Sets the threshold for matrix NNZ where OPENMP
# turns on. This is automatically calculated and
# put in the qutiprc file. This value is here in case
# that failts
openmp_thresh = 10000
# Note that since logging depends on settings,
# if we want to do any logging here, it must be manually
# configured, rather than through _logging.get_logger().
try:
import logging
_logger = logging.getLogger(__name__)
_logger.addHandler(logging.NullHandler())
del logging # Don't leak names!
except:
_logger = None
def _valid_config(key):
if key == "absolute_import":
return False
if key.startswith("_"):
return False
val = __self[key]
if isinstance(val, (bool, int, float, complex, str)):
return True
return False
_environment_keys = ["ipython", 'has_mkl', 'has_openmp',
'mkl_lib', 'fortran', 'num_cpus']
__self = locals().copy() # Not ideal, making an object would be better
__all_out = [key for key in __self if _valid_config(key)]
__all = [key for key in __all_out if key not in _environment_keys]
__default = {key: __self[key] for key in __all}
__section = "qutip"
del _valid_config
__self = locals()
def save(file='qutiprc', all_config=True):
"""
Write the settings to a file.
Default file is 'qutiprc' which is loaded when importing qutip.
File are stored in .qutip directory in the user home.
The file can be a full path or relative to home to save elsewhere.
If 'all_config' is used, also load other available configs.
"""
from qutip.configrc import write_rc_qset, write_rc_config
if all_config:
write_rc_config(file)
else:
write_rc_qset(file)
def load(file='qutiprc', all_config=True):
"""
Loads the settings from a file.
Default file is 'qutiprc' which is loaded when importing qutip.
File are stored in .qutip directory in the user home.
The file can be a full path or relative to home to save elsewhere.
If 'all_config' is used, also load other available configs.
"""
from qutip.configrc import load_rc_qset, load_rc_config
if all_config:
load_rc_config(file)
else:
load_rc_qset(file)
def reset():
"""Hard reset of the qutip.settings values
Recompute the threshold for openmp, so it may be slow.
"""
for key in __default:
__self[key] = __default[key]
import os
if 'QUTIP_NUM_PROCESSES' in os.environ:
num_cpus = int(os.environ['QUTIP_NUM_PROCESSES'])
elif 'cpus' in info:
import qutip.hardware_info
info = qutip.hardware_info.hardware_info()
num_cpus = info['cpus']
else:
try:
num_cpus = multiprocessing.cpu_count()
except:
num_cpus = 1
__self["num_cpus"] = num_cpus
try:
from qutip.cy.openmp.parfuncs import spmv_csr_openmp
except:
__self["has_openmp"] = False
__self["openmp_thresh"] = 10000
else:
__self["has_openmp"] = True
from qutip.cy.openmp.bench_openmp import calculate_openmp_thresh
thrsh = calculate_openmp_thresh()
__self["openmp_thresh"] = thrsh
try:
__IPYTHON__
__self["ipython"] = True
except:
__self["ipython"] = False
from qutip._mkl.utilities import _set_mkl
_set_mkl()
def __repr__():
out = "qutip settings:\n"
longest = max(len(key) for key in __all_out)
for key in __all_out:
out += "{:{width}} : {}\n".format(key, __self[key], width=longest)
return out
|
game/consumers.py
|
Greenns/channels-obstruction
| 103 |
120130
|
<filename>game/consumers.py
import re
import logging
from channels import Group
from channels.sessions import channel_session
from .models import Game, GameSquare
from channels.auth import http_session_user, channel_session_user, channel_session_user_from_http
log = logging.getLogger(__name__)
from django.utils.decorators import method_decorator
from channels.generic.websockets import JsonWebsocketConsumer
class LobbyConsumer(JsonWebsocketConsumer):
# Set to True to automatically port users from HTTP cookies
# (you don't need channel_session_user, this implies it)
http_user = True
def connection_groups(self, **kwargs):
"""
Called to return the list of groups to automatically add/remove
this connection to/from.
"""
print("adding to connection group lobby")
return ["lobby"]
def connect(self, message, **kwargs):
"""
Perform things on connection start
"""
pass
def receive(self, content, **kwargs):
"""
Called when a message is received with either text or bytes
filled out.
"""
channel_session_user = True
action = content['action']
if action == 'create_game':
# create a new game using the part of the channel name
Game.create_new(self.message.user)
def disconnect(self, message, **kwargs):
"""
Perform things on connection close
"""
pass
class GameConsumer(JsonWebsocketConsumer):
# Set to True to automatically port users from HTTP cookies
# (you don't need channel_session_user, this implies it)
http_user = True
def connection_groups(self, **kwargs):
"""
Called to return the list of groups to automatically add/remove
this connection to/from.
"""
return ["game-{0}".format(kwargs['game_id'])]
def connect(self, message, **kwargs):
"""
Perform things on connection start
"""
pass
def receive(self, content, **kwargs):
"""
Called when a message is received with either text or bytes
filled out.
"""
channel_session_user = True
action = content['action']
print("MESSAGE ON OBSTRUCTION - {0}".format(action))
if action == 'claim_square':
# get the square object
square = GameSquare.get_by_id(content['square_id'])
square.claim('Selected', self.message.user)
if action == 'chat_text_entered':
# chat text
game = Game.get_by_id(content['game_id'])
game.add_log(content['text'], self.message.user)
game.send_game_update()
def disconnect(self, message, **kwargs):
"""
Perform things on connection close
"""
pass
|
cherrypy/__main__.py
|
abancu/core
| 674 |
120133
|
<reponame>abancu/core
import cherrypy.daemon
if __name__ == '__main__':
cherrypy.daemon.run()
|
tests/integration/document_test.py
|
RaduG/python-cloudant
| 187 |
120150
|
#!/usr/bin/env python
# Copyright (c) 2015 IBM. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
_document_test_
document module integration tests
"""
import requests
import unittest
import uuid
from cloudant import cloudant
from cloudant.credentials import read_dot_cloudant
from .. import unicode_
class DocumentTest(unittest.TestCase):
"""
Verify that we can do stuff to a document.
"""
def setUp(self):
self.user, self.passwd = read_dot_cloudant(filename="~/.clou")
self.dbname = unicode_("cloudant-document-tests-{0}".format(
unicode_(uuid.uuid4())
))
def tearDown(self):
with cloudant(self.user, self.passwd, account=self.user) as c:
c.delete_database(self.dbname)
def test_delete(self):
with cloudant(self.user, self.passwd, account=self.user) as c:
db = c.create_database(self.dbname)
doc1 = db.create_document({"_id": "doc1", "testing": "document 1"})
doc1.save()
doc1.fetch()
doc1.delete()
self.assertRaises(requests.HTTPError, doc1.fetch)
if __name__ == '__main__':
unittest.main()
|
src/pyrobot/azure_kinect/camera.py
|
gujralsanyam22/pyrobot
| 2,150 |
120177
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pyrobot.kinect2.camera import Kinect2Camera
class AzureKinectCamera(Kinect2Camera):
"""
This is camera class that interfaces with the KinectV2 camera
"""
def __init__(self, configs):
"""
Constructor of the KinectV2Camera class.
:param configs: Camera specific configuration object
:type configs: YACS CfgNode
"""
super(AzureKinectCamera, self).__init__(configs=configs)
|
scylla/proxy/__init__.py
|
peng4217/scylla
| 3,556 |
120186
|
<reponame>peng4217/scylla
from .server import start_forward_proxy_server_non_blocking, start_forward_proxy_server
|
pyunity/examples/example9/__init__.py
|
pyunity/pyunity
| 158 |
120197
|
from pyunity import Behaviour, ShowInInspector, RectTransform, Screen, Vector2, Input, CheckBox, Text, SceneManager, GameObject, Canvas, Texture2D, Gui, RectOffset, Logger, Image2D, FontLoader, RGB
import os
class Mover2D(Behaviour):
rectTransform = ShowInInspector(RectTransform)
speed = ShowInInspector(float, 300)
def Start(self):
self.rectTransform.offset.Move(Screen.size / 2)
def Update(self, dt):
movement = Vector2(Input.GetAxis("Horizontal"), -
Input.GetAxis("Vertical"))
self.rectTransform.offset.Move(movement * dt * self.speed)
self.rectTransform.rotation += 270 * dt
class FPSTracker(Behaviour):
text = ShowInInspector(Text)
def Start(self):
self.a = 0
def Update(self, dt):
self.a += dt
if self.a > 0.05:
self.text.text = str(1 / dt)
self.a = 0
class CheckboxTracker(Behaviour):
check = ShowInInspector(CheckBox)
text = ShowInInspector(Text)
def Update(self, dt):
self.text.text = "On" if self.check.checked else "Off"
def main():
scene = SceneManager.AddScene("Scene")
canvas = GameObject("Canvas")
canvas.AddComponent(Canvas)
scene.Add(canvas)
imgObject = GameObject("Image", canvas)
rectTransform = imgObject.AddComponent(RectTransform)
rectTransform.offset = RectOffset.Rectangle(100)
imgObject.AddComponent(Mover2D).rectTransform = rectTransform
img = imgObject.AddComponent(Image2D)
img.depth = -0.1
img.texture = Texture2D(os.path.join(os.path.dirname(
os.path.dirname(os.path.abspath(__file__))), "example8", "logo.png"))
scene.Add(imgObject)
rect, button, text = Gui.MakeButton(
"Button", scene, "Click me", FontLoader.LoadFont("Consolas", 20))
rect.transform.ReparentTo(canvas.transform)
rect.offset = RectOffset(Vector2(40, 25), Vector2(190, 50))
button.callback = lambda: Logger.Log("Clicked")
rect, checkbox = Gui.MakeCheckBox("Checkbox", scene)
rect.transform.ReparentTo(canvas.transform)
rect.offset = RectOffset(Vector2(300, 50), Vector2(325, 75))
label = GameObject("Label")
text = label.AddComponent(Text)
text.text = "Off"
text.color = RGB(0, 0, 0)
label.AddComponent(RectTransform).offset = RectOffset(
Vector2(330, 50), Vector2(425, 75))
label.transform.ReparentTo(canvas.transform)
scene.Add(label)
tracker = rect.AddComponent(CheckboxTracker)
tracker.text = text
tracker.check = checkbox
t = GameObject("Text", canvas)
rect = t.AddComponent(RectTransform)
rect.anchors.SetPoint(Vector2(1, 0))
rect.offset.min = Vector2(-150, 25)
text = t.AddComponent(Text)
text.text = "60"
text.color = RGB(0, 0, 0)
t.AddComponent(FPSTracker).text = text
scene.Add(t)
SceneManager.LoadScene(scene)
if __name__ == "__main__":
main()
|
openunreid/data/utils/data_utils.py
|
zwzhang121/OpenUnReID
| 344 |
120273
|
<reponame>zwzhang121/OpenUnReID
import os.path as osp
from PIL import Image
def read_image(path):
"""Reads image from path using ``PIL.Image``.
Args:
path (str): path to an image.
Returns:
PIL image
"""
got_img = False
if not osp.exists(path):
raise IOError('"{}" does not exist'.format(path))
while not got_img:
try:
img = Image.open(path).convert("RGB")
got_img = True
except IOError:
print(
f'IOError incurred when reading "{path}". '
f"Will redo. Don't worry. Just chill."
)
return img
def save_image(image_numpy, path, aspect_ratio=1.0):
"""Save a numpy image to the disk
Parameters:
image_numpy (numpy array) -- input numpy array
path (str) -- the path of the image
"""
image_pil = Image.fromarray(image_numpy)
h, w, _ = image_numpy.shape
if aspect_ratio > 1.0:
image_pil = image_pil.resize((h, int(w * aspect_ratio)), Image.BICUBIC)
if aspect_ratio < 1.0:
image_pil = image_pil.resize((int(h / aspect_ratio), w), Image.BICUBIC)
image_pil.save(path)
|
wrappers/python/tests/did/test_list_my_dids_with_meta.py
|
absltkaos/indy-sdk
| 636 |
120304
|
<gh_stars>100-1000
import json
import pytest
from indy import did, error
@pytest.mark.asyncio
async def test_list_my_dids_works(wallet_handle, seed_my1, did_my1, verkey_my1, metadata):
await did.create_and_store_my_did(wallet_handle, json.dumps({'seed': seed_my1}))
await did.set_did_metadata(wallet_handle, did_my1, metadata)
res_json = await did.list_my_dids_with_meta(wallet_handle)
res = json.loads(res_json)
assert len(res) == 1
assert res[0]["did"] == did_my1
assert res[0]["metadata"] == metadata
assert res[0]["verkey"] == verkey_my1
@pytest.mark.asyncio
async def test_list_my_dids_works_for_invalid_handle(wallet_handle):
with pytest.raises(error.WalletInvalidHandle):
await did.list_my_dids_with_meta(wallet_handle + 1)
|
2021_04_14/dojo_test.py
|
devppjr/dojo
| 114 |
120309
|
import unittest
from dojo import main
class DojoTest(unittest.TestCase):
def test_zero(self):
primes_list = list(primes(0))
self.assertListEqual(primes_list, [])
def test_one(self):
primes_list = list(primes(3))
self.assertListEqual(primes_list, [2])
def test_two(self):
primes_list = list(primes(12))
self.assertListEqual(primes_list, [2,3,5,7,11])
if __name__ == '__main__':
unittest.main()
|
netbox/tenancy/migrations/0002_tenant_ordering.py
|
cybarox/netbox
| 4,994 |
120343
|
<reponame>cybarox/netbox
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tenancy', '0001_squashed_0012'),
]
operations = [
migrations.AlterModelOptions(
name='tenant',
options={'ordering': ['name']},
),
]
|
libmem-py/configure.py
|
CKLinuxProject/libmem
| 195 |
120377
|
import os
import shutil
import json
root_dir = os.pardir
libmem_dir = f"{root_dir}{os.sep}libmem"
project_dir = os.curdir
project_src_dir = f"{project_dir}{os.sep}src/libmem-py"
clean_script = "clean.py"
print(f"[+] Creating '{clean_script}'...")
keep_dirs = []
keep_files = []
for (path, dirs, files) in os.walk(os.curdir):
keep_dirs.append(path)
keep_files.extend([f"{path}{os.sep}{f}" for f in files])
json_dict = {
"dirs" : keep_dirs,
"files" : keep_files
}
json_data = json.dumps(json_dict)
with open("tree.json", "w") as tree_file:
tree_file.write(json_data)
tree_file.close()
print(f"[-] Creation complete")
print("[+] Configuring files...")
project_files = {
# (src_dir : dst_dir) [ files ]
(root_dir, project_dir) : [
"README.md",
"LICENSE"
],
(libmem_dir, project_src_dir) : [
"libmem.h",
"libmem.c"
]
}
for i in project_files:
src_dir = i[0]
dst_dir = i[1]
files = project_files[i]
print(f"[*] Source Directory: {src_dir}")
print(f"[*] Destination Directory: {dst_dir}")
print(f"[*] Files: {files}")
for f in files:
shutil.copy(f"{src_dir}{os.sep}{f}", dst_dir)
print("====================")
print("[-] Configuration complete")
|
neuspell/noising/pcrn_utils.py
|
TheMortalCoil92/neuspell_access
| 422 |
120383
|
<reponame>TheMortalCoil92/neuspell_access<gh_stars>100-1000
import json
import string
import numpy as np
from tqdm.autonotebook import tqdm
append_left_start = lambda context: "<<" + context
append_right_end = lambda context: context + ">>"
isascii = lambda s: len(s) == len(s.encode())
ispunct = lambda s: s in string.punctuation
def load_stats(file_name):
"""
# keys converted back from strings while loading
"""
stats = {}
try:
opfile = open(file_name, 'r')
stats = json.load(opfile)
opfile.close()
keys = [*stats.keys()]
for key in keys:
stats[int(key)] = stats.pop(key)
except:
pass
return stats
def get_lcs(str1: "correct word", str2: "error word"):
l1, l2 = len(str1), len(str2)
dp_counts = [[-np.inf] * l2 for _ in range(l1)]
dp_changes = [[[]] * l2 for _ in range(l1)]
return __get_dp(str1, str2, l1 - 1, l2 - 1, dp_counts, dp_changes)
def __get_dp(w_c, w_e, i_c, i_e, dp_counts, dp_changes):
if not i_c >= i_e: return [], np.inf
if i_e == -1 and i_c == -1: return [], 0
if i_e == -1 and i_c >= 0: return [(i, w_c[i], "") for i in range(i_c, -1, -1)], i_c + 1
if dp_counts[i_c][i_e] == -np.inf: # need to fill
if w_c[i_c] == w_e[i_e]:
dp_changes[i_c][i_e], dp_counts[i_c][i_e] = \
__get_dp(w_c, w_e, i_c - 1, i_e - 1, dp_counts, dp_changes)
else: # replace with char or replace will null in w_c as l_c>=l_e
case1_changes, case1_count = __get_dp(w_c, w_e, i_c - 1, i_e - 1, dp_counts, dp_changes)
case2_changes, case2_count = __get_dp(w_c, w_e, i_c - 1, i_e, dp_counts, dp_changes)
if case1_count <= case2_count:
dp_changes[i_c][i_e], dp_counts[i_c][i_e] = \
case1_changes + [(i_c, w_c[i_c], w_e[i_e])], case1_count + 1
else:
dp_changes[i_c][i_e], dp_counts[i_c][i_e] = \
case2_changes + [(i_c, w_c[i_c], "")], case2_count + 1
return dp_changes[i_c][i_e], dp_counts[i_c][i_e]
def __get_replace_probs(stats, context_end_tokens, correct_char,
context_length_category, return_sorted_list=False):
try:
prob = stats[context_length_category][correct_char][context_end_tokens]
if return_sorted_list:
prob = sorted(prob.items(), key=operator.itemgetter(1), reverse=True)
except KeyError:
prob = {}
return prob
def __sum_to_one(vals_):
try:
vals = vals_.copy()
divide_by = sum(vals)
return [val / divide_by for val in vals]
except TypeError as e:
print(vals_)
raise Exception(e)
def __replace_only_topk(true_chars, mod_chars, mod_probs, top_k):
return_nchanges = 0
if top_k == 0: return "".join(true_chars), return_nchanges
assert len(true_chars) == len(mod_chars) == len(mod_probs)
_mod_inds, _mod_probs = [], []
for i, (true_char, mod_char, mod_prob) in enumerate(zip(true_chars, mod_chars, mod_probs)):
if mod_char != true_char: _mod_inds.append(i); _mod_probs.append(mod_prob)
if not len(_mod_inds) == 0:
for ind_ in np.random.choice(_mod_inds, min(top_k, len(_mod_inds)), \
replace=False, p=__sum_to_one(_mod_probs)):
true_chars[ind_] = mod_chars[ind_]
return_nchanges += min(top_k, len(_mod_inds))
return "".join(true_chars), return_nchanges
def _get_replace_probs_all_contexts(stats,
raw_context,
is_beginning,
correct_char,
alphas,
print_stats=False,
nascii=128):
"""
# for a given context of length 0 to 3, and the character that has to be replaced
# this method, obtains a an ordered list of possiblereplacements, ordered in decreasing
# probability scores
"""
# the first 0-127 ASCII include punctuations, numbers and alphabets
assert len(raw_context) <= len(alphas) - 1
replace_char_probs = [0] * (nascii + 1)
epsilon_index = nascii
sum_alpha = 0
for ln in range(0, len(raw_context) + 1): # [0,1,2,3,...,len(context)]
selected_raw_context = raw_context[ln:]
selected_raw_context_len = len(selected_raw_context)
alpha = alphas[len(selected_raw_context)]
if alpha != 0:
sum_alpha += alpha
selected_raw_context_new = append_left_start(selected_raw_context) \
if (ln == 0 and is_beginning) else selected_raw_context
probs_dict = __get_replace_probs(stats, selected_raw_context_new,
correct_char, selected_raw_context_len)
if print_stats: print(sorted(probs_dict.items(), key=operator.itemgetter(1), reverse=True))
for replace_char, prob in probs_dict.items():
if replace_char == "":
replace_char_probs[epsilon_index] += prob * alpha
else:
replace_char_probs[ord(replace_char)] += prob * alpha
if print_stats: print(f"sum_alpha: {sum_alpha}")
normalize_by = sum_alpha
if sum(replace_char_probs) == 0:
if correct_char != "":
replace_char_probs[ord(correct_char)] = 1
else:
replace_char_probs[epsilon_index] = 1
normalize_by = 1
else:
replace_char_probs = [val / normalize_by for val in replace_char_probs]
# should result in replace_char_probs s.t. sum(replace_char_probs)=1
# but in cases where alpha is non-zero but probs are {}, the sum of probs is zero for that alpha,
# breaking the realization that
# << alpha1*[...]+alpha2*[....]+alpha3*[...] / alpha1+alpha2+alpha3 = 1 >>
replace_char_probs = __sum_to_one(replace_char_probs)
if print_stats:
replace_dict = {}
for i, val in enumerate(replace_char_probs):
if val != 0:
if (i == nascii):
replace_dict[""] = val
else:
replace_dict[chr(i)] = val
replace_dict_sorted = sorted(replace_dict.items(), key=operator.itemgetter(1), reverse=True)
print(replace_dict_sorted)
replace_me_with = []
for _ in range(20):
replace_char = np.random.choice([chr(p) \
for p in range(nascii)] + [""], p=replace_char_probs)
replace_me_with.append(replace_char)
print(replace_me_with)
return
else:
replace_char = np.random.choice([chr(p) for p in range(nascii)] + [""], p=replace_char_probs)
replace_char_prob = replace_char_probs[ord(replace_char)] \
if replace_char != "" else replace_char_probs[epsilon_index]
return replace_char, replace_char_prob
def noisyfy_backoff_homophones(stats, lines, alphas, homophones={}, topk=-1, lower=False, print_data=False):
"""
# noisy-fy some sampled clean data using previously computed stats
# using backoff weights (the previously computed stats dictionary) and
# using homophones (a dictionary of words with values as list of its homophones in english)
# inputs
# stats: a dictionary of replacement probabilities (See stats.__add_this_info() for details)
# lines: a list of clean lines of text
# alphas: weightages for probability scores;
# index 0 to 3 correspondingly for 0 to 3 length context
# homophones: a dictionary of words as keys, each value being a list of corresponding
# homophones
# topk: -1 to not use this arument, else choose the top-k most probable replacements
# lower: whether to lower case the clean data before injecting noise
# print_data: controls verbosity of the print statements; set to True for progress
# outputs
# a list of corrupted lines, one line per line of input lines
"""
max_gram = len(alphas) - 1
homophones_set = set([*homophones.keys()])
new_lines = []
nchars, nchanges = 0, 0
nwords, nwchanges = 0, 0
print("total lines in inp to noisyfy_backoff_homophones: {}".format(len(lines)))
print("total tokens in inp to noisyfy_backoff_homophones: {}".format(
sum([len(line.strip().split()) for line in lines])))
for lll, line in tqdm(enumerate(lines)):
wtokens_ = line.strip().lower().split() if lower else line.strip().split()
wtokens = line.strip().lower().split() if lower else line.strip().split()
for ttt, token in enumerate(wtokens):
nchars += len(token)
if token in homophones_set and np.random.rand() <= 0.1:
choices = homophones[token] # obtain the choice list
wtokens[ttt] = np.random.choice(choices)
if print_data: print(lll, token, wtokens[ttt], " <-- homophone")
_nchanges = get_lcs(token, wtokens[ttt])[1] if len(token) >= len(wtokens[ttt]) \
else get_lcs(wtokens[ttt], token)[1]
nchanges += _nchanges
continue
top_k = topk
if top_k == 0:
if len(token) <= 3:
top_k = np.random.choice([0, 1], p=[0.9, 0.1])
elif 3 < len(token) <= 6:
top_k = np.random.choice([0, 1, 2], p=[0.7, 0.15, 0.15])
else:
top_k = np.random.choice([0, 1, 2, 3, 4], p=[0.20, 0.25, 0.35, 0.15, 0.05])
true_chars, mod_chars, mod_probs = [char for char in token], [], []
_nchanges = 0
for i, char in enumerate(token):
if (not isascii(char)) or ispunct(char):
mod_chars += [char]
mod_probs += [0]
continue
start_ = max(0, i - max_gram)
replace_char, replace_char_prob = \
_get_replace_probs_all_contexts(stats, token[start_:i], \
True if start_ == 0 else False, token[i], alphas, print_stats=False)
mod_chars.append(replace_char)
mod_probs.append(replace_char_prob)
if char != replace_char: _nchanges += 1
if top_k >= 0:
replace_token, _nchanges = __replace_only_topk(true_chars, mod_chars, mod_probs, top_k)
else:
replace_token, _nchanges = "".join(mod_chars), _nchanges
nchanges += _nchanges
wtokens[ttt] = replace_token
if print_data and replace_token != token:
print(lll, token, wtokens[ttt], " <-- corrupted", f" <-- top_{top_k}" if top_k >= 0 else "")
if print_data and replace_token == token:
print(lll, token, wtokens[ttt])
new_lines.append(" ".join(wtokens))
nwchanges += sum([1 if a != b else 0 for a, b in zip(wtokens_, wtokens)])
nwords += len(wtokens_)
if print_data: print("original line --> ", line)
if print_data: print("corrupted line --> ", new_lines[lll])
if print_data:
print(f"total observed characters: {nchars}, \
total corrupted characters: {nchanges}, \
percent corrupted: {100 * nchanges / nchars}")
print(f"total observed words: {nwords}, \
total corrupted words: {nwchanges}, \
percent corrupted: {100 * nwchanges / nwords}")
return new_lines
|
SoftLayer/fixtures/SoftLayer_Network_CdnMarketplace_Configuration_Mapping_Path.py
|
dvzrv/softlayer-python
| 126 |
120392
|
<reponame>dvzrv/softlayer-python
listOriginPath = [
{
"header": "test.example.com",
"httpPort": 80,
"mappingUniqueId": "993419389425697",
"origin": "10.10.10.1",
"originType": "HOST_SERVER",
"path": "/example",
"status": "RUNNING"
},
{
"header": "test.example.com",
"httpPort": 80,
"mappingUniqueId": "993419389425697",
"origin": "10.10.10.1",
"originType": "HOST_SERVER",
"path": "/example1",
"status": "RUNNING"
}
]
createOriginPath = [
{
"header": "test.example.com",
"httpPort": 80,
"mappingUniqueId": "993419389425697",
"origin": "10.10.10.1",
"originType": "HOST_SERVER",
"path": "/example",
"status": "RUNNING",
"bucketName": "test-bucket",
'fileExtension': 'jpg',
"performanceConfiguration": "General web delivery"
}
]
deleteOriginPath = "Origin with path /example/videos/* has been deleted"
|
tests/modules/ambiguous/pkg1/__init__.py
|
jouve/coveragepy
| 2,254 |
120394
|
<gh_stars>1000+
print("Ambiguous pkg1")
|
tests/functional/configs/test_get_default.py
|
tomasfarias/dbt-core
| 799 |
120398
|
import pytest
from dbt.tests.util import run_dbt
models_get__any_model_sql = """
-- models/any_model.sql
select {{ config.get('made_up_nonexistent_key', 'default_value') }} as col_value
"""
class TestConfigGetDefault:
@pytest.fixture(scope="class")
def models(self):
return {"any_model.sql": models_get__any_model_sql}
def test_config_with_get_default(
self,
project,
):
# This test runs a model with a config.get(key, default)
# The default value is 'default_value' and causes an error
results = run_dbt(["run"], expect_pass=False)
assert len(results) == 1
assert str(results[0].status) == "error"
assert 'column "default_value" does not exist' in results[0].message
|
doc/version.py
|
DougReeder/remotestorage.js
| 1,737 |
120411
|
<gh_stars>1000+
__version__ = '2.0.0-beta.1'
|
hwt/serializer/utils.py
|
ufo2011/hwt
| 134 |
120413
|
from hwt.doc_markers import internal
from hwt.hdl.statements.assignmentContainer import HdlAssignmentContainer
from hwt.hdl.statements.codeBlockContainer import HdlStmCodeBlockContainer
from hwt.hdl.statements.statement import HdlStatement
from hwt.synthesizer.rtlLevel.mainBases import RtlSignalBase
@internal
def getMaxStmIdForStm(stm):
"""
Get maximum _instId from all assignments in statement,
used for sorting of processes in architecture
"""
maxId = 0
if isinstance(stm, HdlAssignmentContainer):
return stm._instId
else:
for _stm in stm._iter_stms():
maxId = max(maxId, getMaxStmIdForStm(_stm))
return maxId
def RtlSignal_sort_key(s: RtlSignalBase):
return (s.name, s._instId)
def HdlStatement_sort_key(stm: HdlStatement):
if isinstance(stm, HdlStmCodeBlockContainer) and stm.name is not None:
return (stm.name, getMaxStmIdForStm(stm))
else:
return ("", getMaxStmIdForStm(stm))
|
saw-remote-api/python/saw_client/llvm_type.py
|
Grain/saw-script
| 411 |
120415
|
from abc import ABCMeta, abstractmethod
from typing import Any, Dict, List, Optional, Set, Union, overload
class LLVMType(metaclass=ABCMeta):
@abstractmethod
def to_json(self) -> Any: pass
class LLVMIntType(LLVMType):
def __init__(self, width : int) -> None:
self.width = width
def to_json(self) -> Any:
return {'type': 'primitive type', 'primitive': 'integer', 'size': self.width}
class LLVMArrayType(LLVMType):
def __init__(self, elemtype : 'LLVMType', size : int) -> None:
self.size = size
self.elemtype = elemtype
def to_json(self) -> Any:
return { 'type': 'array',
'element type': self.elemtype.to_json(),
'size': self.size }
class LLVMPointerType(LLVMType):
def __init__(self, points_to : 'LLVMType') -> None:
self.points_to = points_to
def to_json(self) -> Any:
return {'type': 'pointer', 'to type': self.points_to.to_json()}
class LLVMAliasType(LLVMType):
def __init__(self, name : str) -> None:
self.name = name
def to_json(self) -> Any:
return {'type': 'type alias',
'alias of': self.name}
class LLVMStructType(LLVMType):
def __init__(self, field_types : List[LLVMType]) -> None:
self.field_types = field_types
def to_json(self) -> Any:
return {'type': 'struct',
'fields': [fld_ty.to_json() for fld_ty in self.field_types]}
class LLVMPackedStructType(LLVMType):
def __init__(self, field_types : List[LLVMType]) -> None:
self.field_types = field_types
def to_json(self) -> Any:
return {'type': 'packed struct',
'fields': [fld_ty.to_json() for fld_ty in self.field_types]}
|
laikaboss/modules/explode_helloworld.py
|
Techie4Life83/USBKIOSK
| 750 |
120439
|
# Copyright 2015 Lockheed Martin Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Import the python libraries needed for your module
import logging
import hashlib
# Import classes and helpers from the Laika framework
from laikaboss.objectmodel import ExternalVars, ModuleObject, ScanError
from laikaboss.util import get_option
from laikaboss.si_module import SI_MODULE
class EXPLODE_HELLOWORLD(SI_MODULE):
'''
A Hello World Laika module to use as a template and guide for your development.
Classes of Laika modules follow these rules:
* Name MUST be in all capitals.
* Name SHOULD use one of the predefined prefixes followed by an expressive name of what the module is or interacts with/on.
* Prefixes: SCAN, META, EXPLODE, LOG, DECODE.
* Class MUST inherit from SI_MODULE.
* Saved in a file with name that is lowercase of the class name.
* Define the _run(...) method, as seen below.
* Should define an __init__(...) method that defines the self.module_name instance variable set to the class name, as seen below.
'''
def __init__(self):
'''
Typical class constructor. Should define instance variable self.module_name set to the class name, to be used for adding metadata and logging. Any other non-runtime specific initialization can happen here, such as making connections to external systems or compiling regular expressions for improved runtime performance.
Note: no additional parameters (besides self) can be defined here, as the framework does not support parameterized initialization at module loading. Instead, use lazy-loading techniques during the first time _run(...) is called, using that methods 'args' parameter or the laikaboss.config for customization.
'''
self.module_name = "EXPLODE_HELLOWORLD"
def _run(self, scanObject, result, depth, args):
'''
Here is where the actual magic happens. This method is called on the object being scanned given that the dispatcher has a matching rule triggering this module.
There are four types of actions that modules do on scan objects:
* Add flags.
* Add metadata.
* Explode children.
* Interact with external systems.
Any module can perform as many of these actions as necessary, and many do more than one, such as adding flags and metadata.
The parameters provided to this method by the framework are:
* scanObject: This is the object currently being scanned (duh) of type ScanObject. It contains all of the flags, metadata and other assigned fields regarding this spefic instance of the object. This parameter is used by modules to access the buffer, add flags and add metadata.
* result: This is the umbrella object that contains all of the ScanObjects created during the root object's scan. It is primarily used by modules to access the parent object of the scanObject when needed.
* depth: This is a leftover parameter from the tracking of the depth of recursion of objects. It is recommended to get this value from the scanObject itself.
* args: This is a dictionary of the runtime arguments provided to the module by the dispatcher. This parameter provides customization of module runs from the dispatcher, so that the module can operate differently based on the type/context of the scanObject.
This method MUST return a list of ModuleObjects. ModuleObjects represent the children exploded (or the less violent extracted) from the scanObject by this module. If the module does not explode any children (as most do not), simply return an empty list. Not returning a list causes the framework to log an error for this module each time it is run, but will not prevent it from running next time, nor will it remove any flags/metadata added by the module run.
'''
# This variable is recommended to be used by all modules as the returned list of ModuleObjects, populated as the children objects are found.
moduleResult = []
# A typical first step is define the configuration options of the module.
# A best practice for configuration options to a module is to honor them in this precedence:
# 3. value set as default in this code
# 2. value specified in config file
# 1. value specified in arguments of the invocation of this module (via the dispatcher)
# To help with this, the get_option(...) method provided in the laikaboss.util module provides a single method call to set the option according to this precedence.
helloworld_param = int(get_option(args, 'param', 'helloworldparam', 10))
# To add flags to the object, use the addFlag method of the scanObject.
# Flags should have the following three parts, separated by ':'s:
# * Shortened name of the module.
# * 'nfo' if the flag is informational, 'err' if the flag is for a policy/logic error (versus a programatic error), or leave blank if a typical flag.
# * Expressive name representing the atomic concept of the flag.
scanObject.addFlag('e_helloworld:nfo:justsayinghello')
# To add metadata to the object, use the addMetadata method of the scanObject.
scanObject.addMetadata(self.module_name, "minsize", helloworld_param)
# If you want to call a separate function, pass the data and let the
# function set flags on the data. The function will also modify the moduleResult variable
# to add subobjects
flags = self._helloworld(scanObject.buffer, moduleResult, helloworld_param)
for flag in flags:
scanObject.addFlag(flag)
# Whenever you need to do a try-except, you must make sure to catch and raise the framework ScanError exceptions.
try:
nonexistant_var.bad_method_call()
except NameError:
pass
except ScanError:
raise
# Always return a list of ModuleObjects (or empty list if no children)
return moduleResult
def _close(self):
'''
Laika module destructor. This method is available for any actions that need to be done prior to the closing of the module, such as shutting down cleanly any client connections or closing files. It does not need to be defined for every module, such as this one, since there is nothing to do here. It is here to remind you that it is available.
'''
pass
@staticmethod
def _helloworld(buffer, moduleResult, helloworld_param):
'''
An example of a worker function you may include in your module.
Note the @staticmethod "decorator" on the top of the function.
These private methods are set to static to ensure immutability since
they may be called more than once in the lifetime of the class
'''
flags = []
# Using the logging module is a great way to create debugging output during testing without generating anything during production.
# The Laika framework does not use the logging module for its logging (it uses syslog underneath several helpers found it laikaboss.util),
# so none of thses messages will clutter up Laika logs.
logging.debug('Hello world!')
logging.debug('HELLOWORLD invoked with helloworld_param value %i', helloworld_param)
if helloworld_param < 10:
flags.append('e_helloworld:nfo:helloworldsmall')
else:
logging.debug('HELLOWORLD(%i >= 10) setting flag', helloworld_param)
flags.append('e_helloworld:nfo:helloworld')
if helloworld_param > 20:
logging.debug('HELLOWORLD(%i > 20) adding new object', helloworld_param)
flags.append('e_helloworld:nfo:helloworldx2')
if len(buffer) > helloworld_param:
# Take the module buffer and trim the first helloworld_param size bytes.
buff = buffer[helloworld_param:]
object_name = 'e_helloworld_%s_%s' % (len(buff), hashlib.md5(buff).hexdigest())
logging.debug('HELLOWORLD - New object: %s', object_name)
# And we can create new objects that go back to the dispatcher and subsequent laika modules
# Any modifications we make to the "moduleResult" variable here will go back to the main function
# laikaboss/objectmodel.py defines the variables you can set for externalVars. Two most common to set are
# contentType
# filename
moduleResult.append(ModuleObject(buffer=buff, externalVars=ExternalVars(filename=object_name)))
else:
logging.debug('HELLOWORLD - object is too small to carve (%i < %i)', len(buffer), helloworld_param)
return set(flags)
|
modules/dbnd/src/dbnd/_vendor/croniter/tests/base.py
|
ipattarapong/dbnd
| 224 |
120446
|
try:
import unittest2 as unittest
except ImportError:
import unittest
class TestCase(unittest.TestCase):
'''
We use this base class for all the tests in this package.
If necessary, we can put common utility or setup code in here.
'''
# vim:set ft=python:
|
examples/scatter1.py
|
yang69can/pyngl
| 125 |
120450
|
#
# File:
# scatter1.py
#
# Synopsis:
# Draws a scatter visualization using polymarkers.
#
# Categories:
# Polymarkers
# Tickmarks
# Text
# XY plots
#
# Author:
# <NAME>
#
# Date of initial publication:
# October, 2004
#
# Description:
# This example reads in some dummy of XY coordinates and
# associated color indices in the range from 1 to 4. A
# scatter plot is drawn using markers colored and sized
# as per the color indices. A quadratic least squares fit
# is calculated.
#
# Effects illustrated:
# o Polymarkers.
# o Least squares fit.
# o XY plot.
# o Tickmark resource settings.
#
# Output:
# A single visualization is produced.
#
# Notes:
# 1.) This visualization is similar in appearance to one provided
# by Joel Norris of GFDL, but dummmy data are used.
from __future__ import print_function
# 2.) This example requires importing the Scientific package.
#
#
# Import numpy.
#
import numpy, os
#
# Import Nio for reading netCDF files.
#
import Nio
#
# Import Ngl.
#
import Ngl
#
# This plot is very similar to one done by Joel Norris of GFDL. The
# original data is no longer available, so dummy data is used in this
# case.
#
# Read the scattered data and extract the x, y, and color variables.
#
dirc = Ngl.pynglpath("data")
ncdf = Nio.open_file(os.path.join(dirc,"cdf","scatter1.nc"),"r")
x = ncdf.variables["x"][:]
y = ncdf.variables["y"][:]
colors = ncdf.variables["colors"][:]
color_index = colors.astype('i')
#
# Open an output workstation.
#
wks_type = "png"
wks = Ngl.open_wks(wks_type,"scatter1")
#
# Label the plot.
#
resources = Ngl.Resources()
resources.tiMainString = "2000 Mar 19 1040-1725Z"
resources.tiMainFont = "helvetica-bold"
resources.tiXAxisString = "Cloud Thickness (m)"
resources.tiXAxisFont = "helvetica-bold"
resources.tiYAxisString = "Liquid Water Path (mm)"
resources.tiYAxisFont = "helvetica-bold"
resources.tiXAxisFontHeightF = 0.025 # Change the font size.
resources.tiYAxisFontHeightF = 0.025
resources.xyLineColors = "black" # Set the line colors.
resources.xyMonoLineThickness = True # Set the line colors.
resources.xyLineThicknessF = 3.0 # Triple the width.
resources.tmXBMajorLengthF = 0.02 # Force tickmarks to point
resources.tmXBMajorOutwardLengthF = 0.02 # out by making the outward
resources.tmXBMinorLengthF = 0.01 # Force tickmarks to point
resources.tmXBMinorOutwardLengthF = 0.01 # out by making the outward
resources.tmXBLabelFont = "helvetica-bold"
resources.tmYLMajorLengthF = 0.02 # tick length equal to the
resources.tmYLMajorOutwardLengthF = 0.02 # total tick length
resources.tmYLMinorLengthF = 0.01
resources.tmYLMinorOutwardLengthF = 0.01
resources.tmYLLabelFont = "helvetica-bold"
resources.trXMaxF = 800. # Force the X-axis to run to 800.
resources.trYMaxF = 0.35 # Force the Y-axis to run to 0.35
txres = Ngl.Resources()
txres.txFont = "helvetica-bold"
txres.txFontHeightF = 0.02
txres.txJust = "CenterLeft"
Ngl.text_ndc(wks,"LWP=0.5 x 1.55 x 10~S~-6~N~ x Thickness~S1~2", \
0.24,0.85,txres)
xpos = 0.245
delx = 0.02
ypos = 0.78
dely = 0.035
gsres = Ngl.Resources()
gsres.gsMarkerIndex = 16 # Change marker type to a filled circle.
gsres.gsMarkerColor = "black" # Change marker color.
gsres.gsMarkerSizeF = 4.0 # Increase marker size.
Ngl.polymarker_ndc(wks,[xpos],[ypos],gsres) # Draw a polymarker.
txres.txFontColor = "black"
Ngl.text_ndc(wks,"20 sec",xpos+delx,ypos,txres)
gsres.gsMarkerColor = "red" # Change marker color.
gsres.gsMarkerSizeF = 7. # Increase marker size.
Ngl.polymarker_ndc(wks,[xpos],[ypos-dely],gsres) # Draw a polymarker.
txres.txFontColor = "red"
Ngl.text_ndc(wks,"1 min",xpos+delx,ypos-dely,txres)
gsres.gsMarkerColor = "blue" # Change marker color.
gsres.gsMarkerSizeF = 10. # Increase marker size.
Ngl.polymarker_ndc(wks,[xpos],[ypos-2*dely],gsres) # Draw a polymarker.
txres.txFontColor = "blue"
Ngl.text_ndc(wks,"5 min",xpos+delx,ypos-2*dely,txres)
gsres.gsMarkerColor = "green" # Change marker color.
gsres.gsMarkerSizeF = 13. # Increase marker size.
Ngl.polymarker_ndc(wks,[xpos],[ypos-3*dely],gsres) # Draw a polymarker.
txres.txFontColor = "green"
Ngl.text_ndc(wks,"20 min",xpos+delx,ypos-3*dely,txres)
#
# Suppress frame call.
#
resources.nglFrame = False
#
# Do a quadratic least squares fit.
#
npoints = len(x)
a = numpy.zeros([npoints,3],'f')
for m in range(npoints):
a[m,0] = 1.
for j in range(1,3):
a[m,j] = x[m]*a[m,j-1]
c = (numpy.linalg.lstsq(a,y,rcond=1.e-15))[0]
#
# Draw the least squares quadratic curve.
#
num = 301
delx = 1000./num
xp = numpy.zeros(num,'f')
yp = numpy.zeros(num,'f')
for i in range(num):
xp[i] = float(i)*delx
yp[i] = c[0]+c[1]*xp[i]+c[2]*xp[i]*xp[i]
plot = Ngl.xy(wks,xp,yp,resources) # Draw least squares quadratic.
#
# Draw a marker at each data point using the specified color index.
#
mres = Ngl.Resources()
mres.gsMarkerIndex = 1
for i in range(len(x)):
if (color_index[i] == 1):
mres.gsMarkerColor = "black"
mres.gsMarkerSizeF = 0.01 #Increase marker size by a factor of 10.
elif (color_index[i] == 2):
mres.gsMarkerColor = "red"
mres.gsMarkerSizeF = 0.02 #Increase marker size by a factor of 10.
elif (color_index[i] == 3):
mres.gsMarkerColor = "blue"
mres.gsMarkerSizeF = 0.04 #Increase marker size by a factor of 10.
elif (color_index[i] == 4):
mres.gsMarkerColor = "green"
mres.gsMarkerSizeF = 0.05 #Increase marker size by a factor of 10.
Ngl.polymarker(wks,plot,x[i],y[i],mres) # Draw polymarkers.
Ngl.frame(wks)
# Clean up and end.
del plot
del resources
Ngl.end()
|
cloudbaseinit/conf/base.py
|
andia10240/cloudbase-init
| 160 |
120454
|
<filename>cloudbaseinit/conf/base.py
# Copyright 2016 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Options(object):
"""Contact class for all the collections of config options."""
def __init__(self, config, group="DEFAULT"):
self._config = config
self._group_name = group
@property
def group_name(self):
"""The group name for the current options."""
return self._group_name
@abc.abstractmethod
def register(self):
"""Register the current options to the global ConfigOpts object."""
pass
@abc.abstractmethod
def list(self):
"""Return a list which contains all the available options."""
pass
|
miscellanies/simple_prefetcher.py
|
zhangzhengde0225/SwinTrack
| 143 |
120459
|
import threading
# Notices: 1. The correctness is relied on GIL
# 2. Iterator is not thread-safe, so don't access by different threads in the same time
class _SimplePrefetcherIterator:
def __init__(self, iterable, low_limit: int, high_limit: int):
super(_SimplePrefetcherIterator, self).__init__()
self.iterable = iterable
self.queue = []
self.low_limit = low_limit
self.high_limit = high_limit
self.low_limit_condition = threading.Condition(threading.Lock())
self.produced_condition = threading.Condition(threading.Lock())
self.end_flag = False
self.thread_exit_flag = False
self.thread = threading.Thread(target=self.worker, daemon=True)
self.thread.start()
self.exp = None
def __del__(self):
if self.thread.is_alive():
self.thread_exit_flag = True
self.low_limit_condition.notify()
self.thread.join()
def __next__(self):
if len(self.queue) == 0:
if self.end_flag:
raise StopIteration
else:
with self.produced_condition:
while True:
# Release GIL
if self.produced_condition.wait(0.5):
break
else:
if len(self.queue) != 0:
break
elif self.end_flag:
if self.exp is not None:
raise self.exp
raise StopIteration
# Release GIL
if not self.thread.is_alive():
if self.exp is not None:
raise self.exp
else:
raise Exception('Worker exited unexpected')
item = self.queue.pop(0)
if len(self.queue) <= self.low_limit:
with self.low_limit_condition:
self.low_limit_condition.notify()
return item
def worker(self):
try:
iterator = iter(self.iterable)
while True:
if self.thread_exit_flag:
return
if len(self.queue) >= self.high_limit:
with self.low_limit_condition:
self.low_limit_condition.wait()
continue
try:
item = next(iterator)
self.queue.append(item)
if len(self.queue) == 1:
with self.produced_condition:
self.produced_condition.notify()
except (StopIteration, IndexError):
break
except Exception as e:
self.exp = e
finally:
self.end_flag = True
class SimplePrefetcher:
def __init__(self, iterable, buffer_low_limit: int = 1, buffer_high_limit: int = 3):
assert buffer_low_limit < buffer_high_limit
assert buffer_low_limit >= 0
self.iterable = iterable
self.low_limit = buffer_low_limit
self.high_limit = buffer_high_limit
def __iter__(self):
return _SimplePrefetcherIterator(self.iterable, self.low_limit, self.high_limit)
def __len__(self):
return len(self.iterable)
def __getattr__(self, item):
return getattr(self.iterable, item)
|
abusehelper/bots/reprbot/reprbot.py
|
AbuseSA/abusehelper
| 117 |
120533
|
"""
Read '/repr key=val, key2=val2' style messages from body and throw
back similar message which includes machine readable idiokit
namespace.
"""
import idiokit
from idiokit.xmpp import jid
from abusehelper.core import bot, events, taskfarm
def _collect_text(element):
yield element.text
for child in element.children():
for text in _collect_text(child):
yield text
yield element.tail
def get_message_text(message):
html = message.children(
"html", ns="http://jabber.org/protocol/xhtml-im"
).children(
"body", ns="http://www.w3.org/1999/xhtml"
)
for body in html:
pieces = list(_collect_text(body))
return u"".join(pieces)
for body in message.children("body"):
return body.text
return None
class ReprBot(bot.ServiceBot):
def __init__(self, *args, **keys):
bot.ServiceBot.__init__(self, *args, **keys)
self.rooms = taskfarm.TaskFarm(self.handle_room)
@idiokit.stream
def session(self, _, src_room, dst_room=None, **keys):
if not dst_room:
dst_room = src_room
yield self.rooms.inc(src_room) | self.rooms.inc(dst_room)
@idiokit.stream
def handle_room(self, name):
self.log.info("Joining room %r", name)
room = yield self.xmpp.muc.join(name, self.bot_name)
self.log.info("Joined room %r", name)
try:
yield idiokit.pipe(
room,
self.reply(room.jid),
events.events_to_elements())
finally:
self.log.info("Left room %r", name)
@idiokit.stream
def reply(self, own_jid):
while True:
element = yield idiokit.next()
sender = jid.JID(element.get_attr("from"))
if sender == own_jid:
continue
for message in element.named("message"):
text = get_message_text(message)
if text is None:
continue
pieces = text.split(None, 1)
if len(pieces) < 2 or pieces[0].lower() not in (u"/repr", u"!repr"):
continue
try:
event = events.Event.from_unicode(pieces[1].strip())
except ValueError:
continue
yield idiokit.send(event)
if __name__ == "__main__":
ReprBot.from_command_line().execute()
|
data/__init__.py
|
phasorhand/PyTorchText
| 1,136 |
120564
|
<gh_stars>1000+
from .fold_dataset import FoldData
|
ykdl/extractors/laifeng.py
|
Shawn-Ji/ykdl
| 1,153 |
120569
|
<reponame>Shawn-Ji/ykdl<gh_stars>1000+
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from ykdl.util.html import get_content
from ykdl.util.match import match1
from ykdl.extractor import VideoExtractor
from ykdl.videoinfo import VideoInfo
import json
from random import randint
import datetime
class Laifeng(VideoExtractor):
name = u'laifeng (来疯直播)'
def prepare(self):
assert self.url, "please provide valid url"
info = VideoInfo(self.name, True)
html = get_content(self.url)
Alias = match1(html, 'initAlias:\'([^\']+)' ,'"ln":\s*"([^"]+)"')
Token = match1(html, 'initToken: \'([^\']+)', '"tk":\s*"([^"]+)"')
info.artist = match1(html, 'anchorName:\s*\'([^\']+)', '"anchorName":\s*"([^"]+)"')
info.title = info.artist + u'的直播房间'
t = datetime.datetime.utcnow().isoformat().split('.')[0] + 'Z'
api_url = "http://lapi.lcloud.laifeng.com/Play?AppId=101&StreamName={}&Action=Schedule&Token={}&Version=2.0&CallerVersion=3.3&Caller=flash&Format=HttpFlv&Timestamp={}&Format=HttpFlv&rd={}".format(Alias, Token, t, randint(10000, 99999) )
data1 = json.loads(get_content(api_url))
assert data1['Code'] == 'Success', data1['Message']
stream_url = data1['HttpFlv'][0]['Url']
info.stream_types.append('current')
info.streams['current'] = {'container': 'flv', 'video_profile': 'current', 'src' : [stream_url], 'size': float('inf')}
return info
site = Laifeng()
|
tests/pytests/unit/modules/test_monit.py
|
eiginn/salt
| 9,425 |
120610
|
"""
:codeauthor: <NAME> <<EMAIL>>
"""
import pytest
import salt.modules.monit as monit
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {monit: {}}
def test_start():
"""
Test for start
"""
with patch.dict(monit.__salt__, {"cmd.retcode": MagicMock(return_value=False)}):
assert monit.start("name")
def test_stop():
"""
Test for Stops service via monit
"""
with patch.dict(monit.__salt__, {"cmd.retcode": MagicMock(return_value=False)}):
assert monit.stop("name")
def test_restart():
"""
Test for Restart service via monit
"""
with patch.dict(monit.__salt__, {"cmd.retcode": MagicMock(return_value=False)}):
assert monit.restart("name")
def test_unmonitor():
"""
Test for Unmonitor service via monit
"""
with patch.dict(monit.__salt__, {"cmd.retcode": MagicMock(return_value=False)}):
assert monit.unmonitor("name")
def test_monitor():
"""
Test for monitor service via monit
"""
with patch.dict(monit.__salt__, {"cmd.retcode": MagicMock(return_value=False)}):
assert monit.monitor("name")
def test_summary():
"""
Test for Display a summary from monit
"""
mock = MagicMock(side_effect=["daemon is not running", "A\nB\nC\nD\nE"])
with patch.dict(monit.__salt__, {"cmd.run": mock}):
assert monit.summary() == {"monit": "daemon is not running", "result": False}
assert monit.summary() == {}
def test_status():
"""
Test for Display a process status from monit
"""
with patch.dict(monit.__salt__, {"cmd.run": MagicMock(return_value="Process")}):
assert monit.status("service") == "No such service"
def test_reload():
"""
Test for Reload configuration
"""
mock = MagicMock(return_value=0)
with patch.dict(monit.__salt__, {"cmd.retcode": mock}):
assert monit.reload_()
def test_version():
"""
Test for Display version from monit -V
"""
mock = MagicMock(return_value="This is Monit version 5.14\nA\nB")
with patch.dict(monit.__salt__, {"cmd.run": mock}):
assert monit.version() == "5.14"
def test_id():
"""
Test for Display unique id
"""
mock = MagicMock(return_value="Monit ID: d3b1aba48527dd599db0e86f5ad97120")
with patch.dict(monit.__salt__, {"cmd.run": mock}):
assert monit.id_() == "d3b1aba48527dd599db0e86f5ad97120"
def test_reset_id():
"""
Test for Regenerate a unique id
"""
expected = {"stdout": "Monit id d3b1aba48527dd599db0e86f5ad97120 and ..."}
mock = MagicMock(return_value=expected)
with patch.dict(monit.__salt__, {"cmd.run_all": mock}):
assert monit.id_(reset=True) == "d3b1aba48527dd599db0e86f5ad97120"
def test_configtest():
"""
Test for Check configuration syntax
"""
excepted = {"stdout": "Control file syntax OK", "retcode": 0, "stderr": ""}
mock = MagicMock(return_value=excepted)
with patch.dict(monit.__salt__, {"cmd.run_all": mock}):
assert monit.configtest()["result"]
assert monit.configtest()["comment"] == "Syntax OK"
def test_validate():
"""
Test for Check all services are monitored
"""
mock = MagicMock(return_value=0)
with patch.dict(monit.__salt__, {"cmd.retcode": mock}):
assert monit.validate()
|
homeassistant/components/worldclock/sensor.py
|
MrDelik/core
| 30,023 |
120642
|
"""Support for showing the time in a different time zone."""
from __future__ import annotations
from datetime import tzinfo
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import CONF_NAME, CONF_TIME_ZONE
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
import homeassistant.util.dt as dt_util
CONF_TIME_FORMAT = "time_format"
DEFAULT_NAME = "Worldclock Sensor"
DEFAULT_TIME_STR_FORMAT = "%H:%M"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_TIME_ZONE): cv.time_zone,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_TIME_FORMAT, default=DEFAULT_TIME_STR_FORMAT): cv.string,
}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the World clock sensor."""
time_zone = dt_util.get_time_zone(config[CONF_TIME_ZONE])
async_add_entities(
[
WorldClockSensor(
time_zone,
config[CONF_NAME],
config[CONF_TIME_FORMAT],
)
],
True,
)
class WorldClockSensor(SensorEntity):
"""Representation of a World clock sensor."""
_attr_icon = "mdi:clock"
def __init__(self, time_zone: tzinfo | None, name: str, time_format: str) -> None:
"""Initialize the sensor."""
self._attr_name = name
self._time_zone = time_zone
self._time_format = time_format
async def async_update(self) -> None:
"""Get the time and updates the states."""
self._attr_native_value = dt_util.now(time_zone=self._time_zone).strftime(
self._time_format
)
|
run_tests.py
|
santosh653/yappi
| 877 |
120656
|
<reponame>santosh653/yappi
import unittest
import sys
def _testsuite_from_tests(tests):
suite = unittest.TestSuite()
loader = unittest.defaultTestLoader
for t in tests:
test = loader.loadTestsFromName('tests.%s' % (t))
suite.addTest(test)
return suite
if __name__ == '__main__':
sys.path.append('tests/')
test_loader = unittest.defaultTestLoader
test_runner = unittest.TextTestRunner(verbosity=2)
tests = [
'test_functionality',
'test_hooks',
'test_tags',
]
if sys.version_info < (3, 10):
tests += ['test_gevent']
if sys.version_info >= (3, 4):
tests += ['test_asyncio']
if sys.version_info >= (3, 7):
tests += ['test_asyncio_context_vars']
test_suite = test_loader.loadTestsFromNames(tests)
if len(sys.argv) > 1:
test_suite = _testsuite_from_tests(sys.argv[1:])
#tests = ['test_functionality.BasicUsage.test_run_as_script']
print("Running following tests: %s" % (tests))
result = test_runner.run(test_suite)
sys.exit(not result.wasSuccessful())
|
tests/changes/artifacts/test_collection_artifact.py
|
vault-the/changes
| 443 |
120667
|
from cStringIO import StringIO
import mock
from changes.artifacts.base import ArtifactParseError
from changes.artifacts.collection_artifact import CollectionArtifactHandler
from changes.config import db
from changes.constants import Result
from changes.models.failurereason import FailureReason
from changes.models.jobplan import JobPlan
from changes.testutils import TestCase
class CollectionArtifactHandlerTest(TestCase):
@mock.patch.object(JobPlan, 'get_build_step_for_job')
def test_valid_json(self, get_build_step_for_job):
buildstep = mock.Mock()
get_build_step_for_job.return_value = (None, buildstep)
project = self.create_project()
build = self.create_build(project)
job = self.create_job(build)
jobphase = self.create_jobphase(job)
jobstep = self.create_jobstep(jobphase)
artifact = self.create_artifact(jobstep, 'tests.json')
handler = CollectionArtifactHandler(jobstep)
handler.FILENAMES = ('/tests.json',)
handler.process(StringIO("{}"), artifact)
buildstep.expand_jobs.assert_called_once_with(jobstep, {})
# make sure changes were committed
db.session.rollback()
assert not FailureReason.query.filter(FailureReason.step_id == jobstep.id).first()
@mock.patch.object(JobPlan, 'get_build_step_for_job')
def test_invalid_json(self, get_build_step_for_job):
buildstep = mock.Mock()
get_build_step_for_job.return_value = (None, buildstep)
project = self.create_project()
build = self.create_build(project)
job = self.create_job(build)
jobphase = self.create_jobphase(job)
jobstep = self.create_jobstep(jobphase)
artifact = self.create_artifact(jobstep, 'tests.json')
handler = CollectionArtifactHandler(jobstep)
handler.FILENAMES = ('/tests.json',)
handler.process(StringIO(""), artifact)
assert buildstep.call_count == 0
# make sure changes were committed
db.session.rollback()
assert FailureReason.query.filter(FailureReason.step_id == jobstep.id).first()
@mock.patch.object(JobPlan, 'get_build_step_for_job')
def test_parse_error(self, get_build_step_for_job):
buildstep = mock.Mock()
get_build_step_for_job.return_value = (None, buildstep)
project = self.create_project()
build = self.create_build(project)
job = self.create_job(build)
jobphase = self.create_jobphase(job)
jobstep = self.create_jobstep(jobphase)
artifact = self.create_artifact(jobstep, 'tests.json')
handler = CollectionArtifactHandler(jobstep)
handler.FILENAMES = ('/tests.json',)
buildstep.expand_jobs.side_effect = ArtifactParseError('bad file')
handler.process(StringIO("{}"), artifact)
buildstep.expand_jobs.assert_called_once_with(jobstep, {})
# make sure changes were committed
db.session.rollback()
assert FailureReason.query.filter(FailureReason.step_id == jobstep.id).first()
@mock.patch.object(JobPlan, 'get_build_step_for_job')
def test_expand_jobs_error(self, get_build_step_for_job):
buildstep = mock.Mock()
get_build_step_for_job.return_value = (None, buildstep)
project = self.create_project()
build = self.create_build(project)
job = self.create_job(build)
jobphase = self.create_jobphase(job)
jobstep = self.create_jobstep(jobphase)
artifact = self.create_artifact(jobstep, 'tests.json')
handler = CollectionArtifactHandler(jobstep)
handler.FILENAMES = ('/tests.json',)
buildstep.expand_jobs.side_effect = Exception('error')
handler.process(StringIO("{}"), artifact)
buildstep.expand_jobs.assert_called_once_with(jobstep, {})
# make sure changes were committed
db.session.rollback()
assert jobstep.result == Result.infra_failed
assert not FailureReason.query.filter(FailureReason.step_id == jobstep.id).first()
|
cbl/function_type.py
|
Commodoreprime/Command-Block-Assembly
| 223 |
120671
|
<filename>cbl/function_type.py
import abc
import cmd_ir.instructions as i
from .native_type import NativeType
from .containers import InstanceSymbol, Parameter, Temporary
class Invokable:
def match_arguments(self, compiler, container, args):
assert False
class FunctionType(NativeType, Invokable):
def __init__(self, ret_type, params, inline, is_async):
self.ret_type = ret_type
self.params = tuple(params)
self.is_inline = inline
self.is_async = is_async
self.is_intrinsic = False
def param_str(self):
return '(%s)' % ', '.join('%s %s%s' % (p.type.typename,
'&' if p.by_ref else '',
p.name) \
for p in self.params)
def allocate(self, compiler, namehint):
return FunctionContainer(self, compiler, namehint)
def intrinsic_invoke(self, container, args):
assert self.is_intrinsic, "Function is not intrinsic"
return container.value.intrinsic_invoke(container, args)
def invoke(self, container, args, ret_args):
assert not self.is_intrinsic, "Cannot call invoke on intrinsic"
return container.value.invoke(args, ret_args)
def match_arguments(self, compiler, container, args):
matcher = FunctionMatcher(compiler, container.value.name)
matcher.add_candidate(container)
return matcher.satisfy(args)
class IntrinsicFunction(metaclass=abc.ABCMeta):
@abc.abstractmethod
def invoke(self, compiler, container, args):
pass
class IntrinsicCallable(IntrinsicFunction):
def __init__(self, func):
assert callable(func)
self.__func = func
def invoke(self, compiler, container, args):
ret = self.__func(compiler, container, args)
assert ret is not None
return ret
class FunctionContainer:
def __init__(self, fn_type, compiler, name):
self.fn_type = fn_type
self.name = name
self.compiler = compiler
self.__real_fn = None
self.__extern_fn = None
def get_or_create_definition(self):
if not self.__real_fn:
self.__real_fn = self.compiler.define_function(self.name)
return self.__real_fn
@property
def ir_param_types(self):
p_types = []
for p in self.fn_type.params:
passtype = 'byref' if p.by_ref else 'byval'
for ptype in p.type.ir_types():
p_types.append((ptype, passtype))
return tuple(p_types)
@property
def ir_ret_types(self):
return tuple(self.fn_type.ret_type.ir_types())
@property
def ir_func(self):
if not self.__real_fn:
return self._get_or_create_extern()
return self.__real_fn
def _get_or_create_extern(self):
if self.__extern_fn is None:
self.__extern_fn = self.compiler.extern_function(
self.name, self.ir_param_types, self.ir_ret_types)
return self.__extern_fn
def extern_if_needed(self):
if self.__real_fn is None:
self._get_or_create_extern()
def set_as_intrinsic(self, intrinsic):
assert not self.fn_type.is_intrinsic
self.fn_type.is_intrinsic = True
self.__real_fn = intrinsic
def intrinsic_invoke(self, container, args):
assert isinstance(self.__real_fn, IntrinsicFunction)
return self.__real_fn.invoke(self.compiler, container, args)
def invoke(self, args, ret_args):
callback = None
if self.fn_type.is_async:
callback = self.compiler.create_block('async_cb')
insn = i.DeferredInvoke(self.ir_func, callback, args, ret_args)
else:
insn = i.Invoke(self.ir_func, args, ret_args)
self.compiler.add_insn(insn)
return callback
class FunctionMatcher:
def __init__(self, compiler, func_name):
self.compiler = compiler
self.candidates = []
self.func_name = func_name
def add_candidate(self, container):
self.candidates.append(container)
def satisfy(self, args):
# Trivial - eliminate different arg length
l = len(args)
candidates = [c for c in self.candidates if len(c.type.params) == l]
# Trivial - exact match
for c in candidates:
match_all = True
for param, arg in zip(c.type.params, args):
if param.type != arg.type:
match_all = False
break
if match_all:
return c, args
for c in candidates:
new_args = []
fail = False
for param, arg in zip(c.type.params, args):
new_arg = arg.type.coerce_to(self.compiler, arg, param.type)
if not new_arg:
fail = True
break
new_args.append(new_arg)
if not fail:
return c, new_args
arg_types = ', '.join(a.type.typename for a in args)
err = "Failed to find funtion overload of %s\n" % self.func_name
err += "Unable to satisfy arguments types: (%s)\n" % (arg_types,)
err += "Tried candidates:\n"
for c in self.candidates:
err += " %s%s\n" % (self.func_name, c.type.param_str())
raise TypeError(err)
class FunctionDispatchType(NativeType, Invokable):
def match_arguments(self, compiler, container, args):
# TODO possibly refactor
has_this = isinstance(container, InstanceSymbol)
if container.value._static:
assert not has_this
thiscont = None
else:
assert has_this
thiscont = Temporary(container.value._type, container.this)
return container.value.satisfy(compiler, thiscont, args)
class InstanceFunctionType(FunctionType):
def __init__(self, inst_type, ret_type, params, inline, is_async, static):
if not static:
new_params = [Parameter(inst_type, 'this', True)]
new_params.extend(params)
else:
new_params = params
super().__init__(ret_type, new_params, inline, is_async)
self.inst_type = inst_type
def allocate(self, compiler, namehint):
return super().allocate(compiler, self.inst_type.typename + '/' + namehint)
class FunctionDispatcher:
def __init__(self, the_type, basename, friendly_name, is_static):
self._name = basename
self.__resolutions = {}
self._type = the_type
self._disp_name = friendly_name
self._static = is_static
@property
def fqn(self):
return self._type.typename + '::' + self._disp_name
def has_resolutions(self):
return len(self.__resolutions) > 0
def check_name(self, params):
name = self.name_for_types(p.type for p in params)
if name in self.__resolutions:
raise TypeError(('A resolution already exists for the function with'
+ ' parameters %s') % (params,))
return name
def add_resolution(self, compiler, ret_type, params, inline, is_async):
name = self.check_name(params)
type = InstanceFunctionType(self._type, ret_type, params, inline,
is_async, self._static)
return self.__add(compiler, name, type)
def add_macro_resolution(self, compiler, ret_type, params, body,
compiletime):
name = self.check_name(params)
if not self._static:
new_params = [Parameter(self._type, 'this', True)]
new_params.extend(params)
else:
new_params = params
from .macro_type import MacroType
type = MacroType(ret_type, new_params, body, compiler.types.snapshot(),
compiletime)
return self.__add(compiler, name, type)
def __add(self, compiler, name, type):
func = type.allocate(compiler, name)
self.__resolutions[name] = type, func
return type, func
def lookup_resolution(self, params):
name = self.name_for_types(p.type for p in params)
return self.__resolutions.get(name)
def name_for_types(self, types):
# use special characters to not conflict with user defined names
return '%s-%s' % (self._name, '-'.join(t.typename for t in types))
def dispatch(self, compiler, thiscont, args):
fnsym, args = self.satisfy(compiler, thiscont, args)
# print("Dispatch", self.fqn + fnsym.type.param_str())
return compiler.function_call_expr(fnsym, *args)
def satisfy(self, compiler, thiscont, orig_args):
if self._static:
assert thiscont is None
args = orig_args
else:
assert thiscont is not None
args = [thiscont]
args.extend(orig_args)
matcher = FunctionMatcher(compiler, self.fqn)
for type, func in self.__resolutions.values():
if not self._static:
sym = InstanceSymbol(thiscont.value, type, func)
else:
sym = Temporary(type, func)
matcher.add_candidate(sym)
return matcher.satisfy(args)
|
models/csqa_dataset.py
|
kachiO/KagNet
| 244 |
120696
|
<reponame>kachiO/KagNet
import torch
import torch.utils.data as data
import numpy as np
import json
from tqdm import tqdm
import timeit
import pickle
import os
import dgl
import networkx as nx
import random
def load_embeddings(path):
print("Loading glove concept embeddings with pooling:", path)
concept_vec = np.load(path)
print("done!")
return concept_vec
class data_with_paths(data.Dataset):
def __init__(self, statement_json_file, pf_json_file, pretrained_sent_vecs, num_choice=5, max_path_len=5, start=0, end=None, cut_off=3):
self.qids = []
self.statements = []
self.correct_labels = []
statement_json_data = []
print("loading statements from %s" % statement_json_file)
with open(statement_json_file, "r") as fp:
for line in fp.readlines():
statement_data = json.loads(line.strip())
statement_json_data.append(statement_data)
print("Done!")
print("loading sent_vecs from %s" % pretrained_sent_vecs)
self.input_sent_vecs = np.load(pretrained_sent_vecs)
print("Done!")
self.qa_text = []
statement_id = 0
# load all statements
for question_id in range(len(statement_json_data)):
statements = []
qa_text_cur = []
self.qids.append([statement_json_data[question_id]["id"]])
for k, s in enumerate(statement_json_data[question_id]["statements"]):
assert len(statement_json_data[question_id]["statements"]) == num_choice # 5
qa_text_cur.append((s["statement"], s['label']))
if s["label"] is True: # true of false
self.correct_labels.append(k) # the truth id [0,1,2,3,4]
statements.append(self.input_sent_vecs[statement_id])
statement_id += 1
self.statements.append(np.array(statements))
self.qa_text.append(qa_text_cur)
# load all qa and paths
self.qa_pair_data = []
self.cpt_path_data = []
self.rel_path_data = []
start_time = timeit.default_timer()
print("loading paths from %s" % pf_json_file)
with open(pf_json_file, 'rb') as handle:
pf_json_data = pickle.load(handle)
print('\t Done! Time: ', "{0:.2f} sec".format(float(timeit.default_timer() - start_time)))
assert len(statement_json_data) * num_choice == len(pf_json_data)
for s in tqdm(pf_json_data, desc="processing paths"):
paths = []
rels = []
qa_pairs = list()
for qas in s:
# (q,a) can be identified by the first and last node in every path
# qc = qas["qc"]
# ac = qas["ac"]
pf_res = qas["pf_res"]
if pf_res is not None:
for item in pf_res:
p = item["path"]
q = p[0] + 1
a = p[-1] + 1
new_qa_pair = False
if (q,a) not in qa_pairs:
qa_pairs.append((q,a))
new_qa_pair = True
if len(p) > cut_off and not new_qa_pair:
continue # cut off by length of concepts
# padding dummy concepts and relations
p = [n + 1 for n in p]
p.extend([0] * (max_path_len - len(p))) # padding
r = item["rel"]
for i_ in range(len(r)):
for j_ in range(len(r[i_])):
if r[i_][j_] - 17 in r[i_]:
r[i_][j_] -= 17 # to delete realtedto* and antonym*
r = [n[0] + 1 for n in r] # only pick the top relation when multiple ones are okay
r.extend([0] * (max_path_len - len(r))) # padding
paths.append(p)
rels.append(r)
self.qa_pair_data.append(list(qa_pairs))
self.cpt_path_data.append(paths)
self.rel_path_data.append(rels)
self.cpt_path_data = list(zip(*(iter(self.cpt_path_data),) * num_choice))
self.rel_path_data = list(zip(*(iter(self.rel_path_data),) * num_choice))
self.qa_pair_data = list(zip(*(iter(self.qa_pair_data),) * num_choice))
# slicing dataset
self.statements = self.statements[start:end]
self.correct_labels = self.correct_labels[start:end]
self.qids = self.qids[start:end]
self.cpt_path_data = self.cpt_path_data[start:end]
self.rel_path_data = self.rel_path_data[start:end]
self.qa_pair_data = self.qa_pair_data[start:end]
assert len(self.statements) == len(self.correct_labels) == len(self.qids) == len(self.cpt_path_data) == len(self.rel_path_data) == len(self.qa_pair_data)
self.n_samples = len(self.statements)
def __len__(self):
return self.n_samples
def __getitem__(self, index):
return torch.Tensor([self.statements[index]]), torch.Tensor([self.correct_labels[index]]), \
self.cpt_path_data[index], self.rel_path_data[index], self.qa_pair_data[index], self.qa_text[index]
class data_with_graphs(data.Dataset):
def __init__(self, statement_json_file, graph_ngx_file, pretrained_sent_vecs, num_choice=5, start=0, end=None, reload=True):
self.qids = []
self.statements = []
self.correct_labels = []
statement_json_data = []
print("loading statements from %s" % statement_json_file)
with open(statement_json_file, "r") as fp:
for line in fp.readlines():
statement_data = json.loads(line.strip())
statement_json_data.append(statement_data)
print("Done!")
print("loading sent_vecs from %s" % pretrained_sent_vecs)
self.input_sent_vecs = np.load(pretrained_sent_vecs)
print("Done!")
self.qa_text = []
statement_id = 0
# load all statements
for question_id in range(len(statement_json_data)):
statements = []
qa_text_cur = []
self.qids.append([statement_json_data[question_id]["id"]])
for k, s in enumerate(statement_json_data[question_id]["statements"]):
assert len(statement_json_data[question_id]["statements"]) == num_choice # 5
qa_text_cur.append((s["statement"], s['label']))
if s["label"] is True: # true of false
self.correct_labels.append(k) # the truth id [0,1,2,3,4]
statements.append(self.input_sent_vecs[statement_id])
statement_id += 1
self.statements.append(np.array(statements))
self.qa_text.append(qa_text_cur)
self.nxgs = []
self.dgs = []
start_time = timeit.default_timer()
print("loading paths from %s" % graph_ngx_file)
with open(graph_ngx_file, 'r') as fr:
for line in fr.readlines():
line = line.strip()
self.nxgs.append(line)
print('\t Done! Time: ', "{0:.2f} sec".format(float(timeit.default_timer() - start_time)))
save_file = graph_ngx_file + ".dgl.pk"
if reload and os.path.exists(save_file):
import gc
print("loading pickle for the dgl", save_file)
start_time = timeit.default_timer()
with open(save_file, 'rb') as handle:
gc.disable()
self.dgs = pickle.load(handle)
gc.enable()
print("finished loading in %.3f secs" % (float(timeit.default_timer() - start_time)))
else:
for index, nxg_str in tqdm(enumerate(self.nxgs), total=len(self.nxgs)):
nxg = nx.node_link_graph(json.loads(nxg_str))
dg = dgl.DGLGraph(multigraph=True)
# dg.from_networkx(nxg, edge_attrs=["rel"])
dg.from_networkx(nxg)
cids = [nxg.nodes[n_id]['cid']+1 for n_id in range(len(dg))] # -1 --> 0 and 0 stands for a palceholder concept
# rel_types = [nxg.edges[u, v, r]["rel"] + 1 for u, v, r in nxg.edges] # 0 is used for
# print(line)
# node_types = [mapping_type[nxg.nodes[n_id]['type']] for n_id in range(len(dg))]
# edge_weights = [nxg.edges[u, v].get("weight", 0.0) for u, v in nxg.edges] # -1 is used for the unk edges
# dg.edata.update({'weights': torch.FloatTensor(edge_weights)})
# dg.edata.update({'rel_types': torch.LongTensor(rel_types)})
dg.ndata.update({'cncpt_ids': torch.LongTensor(cids)})
self.dgs.append(dg)
save_file = graph_ngx_file + ".dgl.pk"
print("saving pickle for the dgl", save_file)
with open(save_file, 'wb') as handle:
pickle.dump(self.dgs, handle, protocol=pickle.HIGHEST_PROTOCOL)
# self.qa_pair_data = list(zip(*(iter(self.qa_pair_data),) * num_choice))
self.nxgs = list(zip(*(iter(self.nxgs),) * num_choice))
self.dgs = list(zip(*(iter(self.dgs),) * num_choice))
# slicing dataset
self.statements = self.statements[start:end]
self.correct_labels = self.correct_labels[start:end]
self.qids = self.qids[start:end]
self.nxgs = self.nxgs[start:end]
self.dgs = self.dgs[start:end]
assert len(self.statements) == len(self.correct_labels) == len(self.qids)
self.n_samples = len(self.statements)
def __len__(self):
return self.n_samples
def __getitem__(self, index):
return torch.Tensor([self.statements[index]]), torch.Tensor([self.correct_labels[index]]), self.dgs[index]
class data_with_graphs_and_paths(data.Dataset):
def __init__(self, statement_json_file, graph_ngx_file, pf_json_file, pretrained_sent_vecs, num_choice=5, start=0, end=None, reload=True, cut_off=3):
self.qids = []
self.statements = []
self.correct_labels = []
statement_json_data = []
print("loading statements from %s" % statement_json_file)
with open(statement_json_file, "r") as fp:
for line in fp.readlines():
statement_data = json.loads(line.strip())
statement_json_data.append(statement_data)
print("Done!")
print("loading sent_vecs from %s" % pretrained_sent_vecs)
self.input_sent_vecs = np.load(pretrained_sent_vecs)
print("Done!")
self.qa_text = []
statement_id = 0
# load all statements
for question_id in range(len(statement_json_data)):
statements = []
qa_text_cur = []
self.qids.append([statement_json_data[question_id]["id"]])
for k, s in enumerate(statement_json_data[question_id]["statements"]):
assert len(statement_json_data[question_id]["statements"]) == num_choice # 5
qa_text_cur.append((s["statement"], s['label']))
if s["label"] is True: # true of false
self.correct_labels.append(k) # the truth id [0,1,2,3,4]
statements.append(self.input_sent_vecs[statement_id])
statement_id += 1
self.statements.append(np.array(statements))
self.qa_text.append(qa_text_cur)
self.nxgs = []
self.dgs = []
start_time = timeit.default_timer()
print("loading paths from %s" % graph_ngx_file)
with open(graph_ngx_file, 'r') as fr:
for line in fr.readlines():
line = line.strip()
self.nxgs.append(line)
print('\t Done! Time: ', "{0:.2f} sec".format(float(timeit.default_timer() - start_time)))
save_file = graph_ngx_file + ".dgl.pk"
if reload and os.path.exists(save_file):
import gc
print("loading pickle for the dgl", save_file)
start_time = timeit.default_timer()
with open(save_file, 'rb') as handle:
gc.disable()
self.dgs = pickle.load(handle)
gc.enable()
print("finished loading in %.3f secs" % (float(timeit.default_timer() - start_time)))
else:
for index, nxg_str in tqdm(enumerate(self.nxgs), total=len(self.nxgs)):
nxg = nx.node_link_graph(json.loads(nxg_str))
dg = dgl.DGLGraph(multigraph=True)
# dg.from_networkx(nxg, edge_attrs=["rel"])
dg.from_networkx(nxg)
cids = [nxg.nodes[n_id]['cid']+1 for n_id in range(len(dg))] # -1 --> 0 and 0 stands for a palceholder concept
# rel_types = [nxg.edges[u, v, r]["rel"] + 1 for u, v, r in nxg.edges] # 0 is used for
# print(line)
# node_types = [mapping_type[nxg.nodes[n_id]['type']] for n_id in range(len(dg))]
# edge_weights = [nxg.edges[u, v].get("weight", 0.0) for u, v in nxg.edges] # -1 is used for the unk edges
# dg.edata.update({'weights': torch.FloatTensor(edge_weights)})
# dg.edata.update({'rel_types': torch.LongTensor(rel_types)})
dg.ndata.update({'cncpt_ids': torch.LongTensor(cids)})
self.dgs.append(dg)
save_file = graph_ngx_file + ".dgl.pk"
print("saving pickle for the dgl", save_file)
with open(save_file, 'wb') as handle:
pickle.dump(self.dgs, handle, protocol=pickle.HIGHEST_PROTOCOL)
# self.qa_pair_data = list(zip(*(iter(self.qa_pair_data),) * num_choice))
self.nxgs = list(zip(*(iter(self.nxgs),) * num_choice))
self.dgs = list(zip(*(iter(self.dgs),) * num_choice))
### loading graphs done
# load all qa and paths
self.qa_pair_data = []
self.cpt_path_data = []
self.rel_path_data = []
start_time = timeit.default_timer()
print("loading paths from %s" % pf_json_file)
with open(pf_json_file, 'rb') as handle:
pf_json_data = pickle.load(handle)
print('\t Done! Time: ', "{0:.2f} sec".format(float(timeit.default_timer() - start_time)))
assert len(statement_json_data) * num_choice == len(pf_json_data)
for s in tqdm(pf_json_data, desc="processing paths"):
paths = []
rels = []
qa_pairs = list()
for qas in s:
# (q,a) can be identified by the first and last node in every path
# qc = qas["qc"]
# ac = qas["ac"]
pf_res = qas["pf_res"]
if pf_res is not None:
for item in pf_res:
p = item["path"]
q = p[0] + 1
a = p[-1] + 1
if len(p) > cut_off:
continue # cut off by length of concepts
# padding dummy concepts and relations
p = [n + 1 for n in p]
p.extend([0] * (cut_off - len(p))) # padding
r = item["rel"]
for i_ in range(len(r)):
for j_ in range(len(r[i_])):
if r[i_][j_] - 17 in r[i_]:
r[i_][j_] -= 17 # to delete realtedto* and antonym*
r = [n[0] + 1 for n in r] # only pick the top relation when multiple ones are okay
r.extend([0] * (cut_off - len(r))) # padding
assert len(p) == cut_off
paths.append(p)
rels.append(r)
if (q, a) not in qa_pairs:
qa_pairs.append((q, a))
self.qa_pair_data.append(list(qa_pairs))
self.cpt_path_data.append(paths)
self.rel_path_data.append(rels)
self.cpt_path_data = list(zip(*(iter(self.cpt_path_data),) * num_choice))
self.rel_path_data = list(zip(*(iter(self.rel_path_data),) * num_choice))
self.qa_pair_data = list(zip(*(iter(self.qa_pair_data),) * num_choice))
# slicing dataset
self.statements = self.statements[start:end]
self.correct_labels = self.correct_labels[start:end]
self.qids = self.qids[start:end]
self.nxgs = self.nxgs[start:end]
self.dgs = self.dgs[start:end]
assert len(self.statements) == len(self.correct_labels) == len(self.qids)
self.n_samples = len(self.statements)
def slice(self, start=0, end=None):
# slicing dataset
all_lists = list(zip(self.statements, self.correct_labels, self.qids, self.nxgs, self.dgs))
random.shuffle(all_lists)
self.statements, self.correct_labels, self.qids, self.nxgs, self.dgs = zip(*all_lists)
self.statements = self.statements[start:end]
self.correct_labels = self.correct_labels[start:end]
self.qids = self.qids[start:end]
self.nxgs = self.nxgs[start:end]
self.dgs = self.dgs[start:end]
assert len(self.statements) == len(self.correct_labels) == len(self.qids)
self.n_samples = len(self.statements)
def __len__(self):
return self.n_samples
def __getitem__(self, index):
return torch.Tensor([self.statements[index]]), torch.Tensor([self.correct_labels[index]]), self.dgs[index], \
self.cpt_path_data[index], self.rel_path_data[index], self.qa_pair_data[index], self.qa_text[index]
def collate_csqa_paths(samples):
# The input `samples` is a list of pairs
# (graph, label, qid, aid).
statements, correct_labels, cpt_path_data, rel_path_data, qa_pair_data, qa_text = map(list, zip(*samples))
sents_vecs = torch.stack(statements)
return sents_vecs, torch.Tensor([[i] for i in correct_labels]), cpt_path_data, rel_path_data, qa_pair_data
def collate_csqa_graphs(samples):
# The input `samples` is a list of pairs
# (graph, label, qid, aid, sentv).
statements, correct_labels, graph_data = map(list, zip(*samples))
flat_graph_data = []
for gd in graph_data:
flat_graph_data.extend(gd)
# for k, g in enumerate(flat_graph_data):
# g.ndata["gid"] = torch.Tensor([k] * len(g.nodes()))
# g.edata["gid"] = torch.Tensor([k] * len(g.edges()[0]))
batched_graph = dgl.batch(flat_graph_data)
sents_vecs = torch.stack(statements)
return sents_vecs, torch.Tensor([[i] for i in correct_labels]), batched_graph
def collate_csqa_graphs_and_paths(samples):
# The input `samples` is a list of pairs
# (graph, label, qid, aid, sentv).
statements, correct_labels, graph_data, cpt_path_data, rel_path_data, qa_pair_data, qa_text = map(list, zip(*samples))
flat_graph_data = []
for gd in graph_data:
flat_graph_data.extend(gd)
concept_mapping_dicts = []
acc_start = 0
for k, g in enumerate(flat_graph_data):
# g.ndata["gid"] = torch.Tensor([k] * len(g.nodes()))
# g.edata["gid"] = torch.Tensor([k] * len(g.edges()[0]))
concept_mapping_dict = {}
for index, cncpt_id in enumerate(g.ndata['cncpt_ids']):
concept_mapping_dict[int(cncpt_id)] = acc_start + index
acc_start += len(g.nodes())
concept_mapping_dicts.append(concept_mapping_dict)
batched_graph = dgl.batch(flat_graph_data)
sents_vecs = torch.stack(statements)
return sents_vecs, torch.Tensor([[i] for i in correct_labels]), batched_graph, cpt_path_data, rel_path_data, qa_pair_data, concept_mapping_dicts
|
desktop_local_tests/macos/test_macos_packet_capture_disrupt_reorder_services.py
|
UAEKondaya1/expressvpn_leak_testing
| 219 |
120701
|
<filename>desktop_local_tests/macos/test_macos_packet_capture_disrupt_reorder_services.py<gh_stars>100-1000
from desktop_local_tests.local_packet_capture_test_case_with_disrupter import LocalPacketCaptureTestCaseWithDisrupter
from desktop_local_tests.macos.macos_reorder_services_disrupter import MacOSDNSReorderServicesDisrupter
class TestMacOSPacketCaptureDisruptReorderServices(LocalPacketCaptureTestCaseWithDisrupter):
'''Summary:
Tests whether traffic leaving the user's device leaks outside of the VPN tunnel when the network
service order is changed.
Details:
This test will connect to VPN then swap the priority of the primary and secondary network
services. The test looks for leaking traffic once the service order is changed.
Discussion:
It's not 100% clear if, in the real world, services can change their order without user
involvement. It is still however a good stress test of the application.
Weaknesses:
Packet capture tests can be noisy. Traffic can be detected as a leak but in actual fact may not
be. For example, traffic might go to a server owned by the VPN provider to re-establish
connections. In general this test is best used for manual exploring leaks rather than for
automation.
Scenarios:
Requires two active network services.
TODO:
Consider a variant which changes the network "Location". This is much more likely to be
something a user might do.
'''
def __init__(self, devices, parameters):
super().__init__(MacOSDNSReorderServicesDisrupter, devices, parameters)
|
mozillians/announcements/tests/test_managers.py
|
divyamoncy/mozillians
| 202 |
120719
|
import pytz
from datetime import datetime
from mock import patch
from nose.tools import eq_
from django.utils.timezone import make_aware
from mozillians.announcements.models import Announcement
from mozillians.announcements.tests import AnnouncementFactory, TestCase
class AnnouncementManagerTests(TestCase):
def setUp(self):
AnnouncementFactory.create(
publish_from=make_aware(datetime(2013, 2, 12), pytz.UTC),
publish_until=make_aware(datetime(2013, 2, 18), pytz.UTC))
AnnouncementFactory.create(
publish_from=make_aware(datetime(2013, 2, 15), pytz.UTC),
publish_until=make_aware(datetime(2013, 2, 17), pytz.UTC))
AnnouncementFactory.create(
publish_from=make_aware(datetime(2013, 2, 21), pytz.UTC),
publish_until=make_aware(datetime(2013, 2, 23), pytz.UTC))
@patch('mozillians.announcements.managers.now')
def test_published(self, mock_obj):
"""Test published() of Announcement Manager."""
mock_obj.return_value = make_aware(datetime(2013, 2, 10), pytz.UTC)
eq_(Announcement.objects.published().count(), 0)
mock_obj.return_value = make_aware(datetime(2013, 2, 13), pytz.UTC)
eq_(Announcement.objects.published().count(), 1)
mock_obj.return_value = make_aware(datetime(2013, 2, 16), pytz.UTC)
eq_(Announcement.objects.published().count(), 2)
mock_obj.return_value = make_aware(datetime(2013, 2, 19), pytz.UTC)
eq_(Announcement.objects.published().count(), 0)
mock_obj.return_value = make_aware(datetime(2013, 2, 24), pytz.UTC)
eq_(Announcement.objects.published().count(), 0)
@patch('mozillians.announcements.managers.now')
def test_unpublished(self, mock_obj):
"""Test unpublished() of Announcement Manager."""
mock_obj.return_value = make_aware(datetime(2013, 2, 10), pytz.UTC)
eq_(Announcement.objects.unpublished().count(), 3)
mock_obj.return_value = make_aware(datetime(2013, 2, 13), pytz.UTC)
eq_(Announcement.objects.unpublished().count(), 2)
mock_obj.return_value = make_aware(datetime(2013, 2, 16), pytz.UTC)
eq_(Announcement.objects.unpublished().count(), 1)
mock_obj.return_value = make_aware(datetime(2013, 2, 19), pytz.UTC)
eq_(Announcement.objects.unpublished().count(), 3)
mock_obj.return_value = make_aware(datetime(2013, 2, 24), pytz.UTC)
eq_(Announcement.objects.unpublished().count(), 3)
|
extra_tests/ctypes_tests/conftest.py
|
nanjekyejoannah/pypy
| 333 |
120736
|
import py
import pytest
import sys
import os
# XXX: copied from pypy/tool/cpyext/extbuild.py
if os.name != 'nt':
so_ext = 'so'
else:
so_ext = 'dll'
def _build(cfilenames, outputfilename, compile_extra, link_extra,
include_dirs, libraries, library_dirs):
try:
# monkeypatch distutils for some versions of msvc compiler
import setuptools
except ImportError:
# XXX if this fails and is required,
# we must call pypy -mensurepip after translation
pass
from distutils.ccompiler import new_compiler
from distutils import sysconfig
# XXX for Darwin running old versions of CPython 2.7.x
sysconfig.get_config_vars()
compiler = new_compiler(force=1)
sysconfig.customize_compiler(compiler) # XXX
objects = []
for cfile in cfilenames:
cfile = py.path.local(cfile)
old = cfile.dirpath().chdir()
try:
res = compiler.compile([cfile.basename],
include_dirs=include_dirs, extra_preargs=compile_extra)
assert len(res) == 1
cobjfile = py.path.local(res[0])
assert cobjfile.check()
objects.append(str(cobjfile))
finally:
old.chdir()
compiler.link_shared_object(
objects, str(outputfilename),
libraries=libraries,
extra_preargs=link_extra,
library_dirs=library_dirs)
def c_compile(cfilenames, outputfilename,
compile_extra=None, link_extra=None,
include_dirs=None, libraries=None, library_dirs=None):
compile_extra = compile_extra or []
link_extra = link_extra or []
include_dirs = include_dirs or []
libraries = libraries or []
library_dirs = library_dirs or []
if sys.platform == 'win32':
link_extra = link_extra + ['/DEBUG'] # generate .pdb file
if sys.platform == 'darwin':
# support Fink & Darwinports
for s in ('/sw/', '/opt/local/'):
if (s + 'include' not in include_dirs
and os.path.exists(s + 'include')):
include_dirs.append(s + 'include')
if s + 'lib' not in library_dirs and os.path.exists(s + 'lib'):
library_dirs.append(s + 'lib')
outputfilename = py.path.local(outputfilename).new(ext=so_ext)
saved_environ = os.environ.copy()
try:
_build(
cfilenames, outputfilename,
compile_extra, link_extra,
include_dirs, libraries, library_dirs)
finally:
# workaround for a distutils bugs where some env vars can
# become longer and longer every time it is used
for key, value in saved_environ.items():
if os.environ.get(key) != value:
os.environ[key] = value
return outputfilename
# end copy
def compile_so_file(udir):
cfile = py.path.local(__file__).dirpath().join("_ctypes_test.c")
if sys.platform == 'win32':
libraries = ['oleaut32']
else:
libraries = []
return c_compile([cfile], str(udir / '_ctypes_test'), libraries=libraries)
@pytest.fixture(scope='session')
def sofile(tmpdir_factory):
udir = tmpdir_factory.mktemp('_ctypes_test')
return str(compile_so_file(udir))
@pytest.fixture
def dll(sofile):
from ctypes import CDLL
return CDLL(str(sofile))
|
src/lib/ntpath.py
|
DTenore/skulpt
| 2,671 |
120749
|
<filename>src/lib/ntpath.py
import _sk_fail; _sk_fail._("ntpath")
|
bbio/libraries/EventIO/eventio.py
|
timgates42/PyBBIO
| 102 |
120753
|
<filename>bbio/libraries/EventIO/eventio.py
"""
EventIO - v0.2
Copyright 2012 - <NAME> <<EMAIL>>
MIT license
Basic multi-process event-driven programming for PyBBIO.
"""
from bbio import *
from ..SafeProcess import *
import time
from collections import deque
from multiprocessing import Process
# Return value of an event function to put it back into the event loop:
EVENT_CONTINUE = True
class EventLoop(SafeProcess):
def config(self):
# deque is better optimized for applications like FIFO queues than
# lists are:
self.events = deque()
def add_event(self, event):
""" Adds given Event instance to the queue. """
self.events.append(event)
def run(self):
""" Starts the event loop. Once started, no new events can be added. """
try:
while(True):
event = self.events.popleft()
if (event.run() == EVENT_CONTINUE):
self.events.append(event)
delay(0.1)
except IndexError:
# Queue is empty; end loop.
pass
# This is the most basic event class. Takes two functions; when 'trigger'
# returns True 'event' is called. If 'event' returns EVENT_CONTINUE the event
# is put back in the event loop. Otherwise it will only be triggered once.
class Event(object):
def __init__(self, trigger, event):
# The trigger function must return something that will evaluate to True
# to trigger the event function.
self.trigger = trigger
self.event = event
def run(self):
if self.trigger():
# The event loop needs the return value of the event function so it can
# signal whether or not to re-add it:
return self.event()
# Otherwise re-add it to keep checking the trigger:
return EVENT_CONTINUE
# This is the same as the basic Event class with the addition of debouncing;
# if an event is triggered and re-added to an event loop, the trigger will be
# ignored for the given number of milliseconds.
class DebouncedEvent(object):
def __init__(self, trigger, event, debounce_ms):
self.trigger = trigger
self.event = event
self.debounce_ms = debounce_ms
self.debouncing = False
self.last_trigger = 0
def run(self):
if (self.debouncing):
if (time.time()*1000-self.last_trigger <= self.debounce_ms):
return EVENT_CONTINUE
self.debouncing = False
if self.trigger():
self.last_trigger = time.time()*1000
self.debouncing = True
return self.event()
return EVENT_CONTINUE
# This event will be triggered after the given number of milliseconds has
# elapsed. If the event function returns EVENT_CONTINUE the timer will
# restart.
class TimedEvent(Event):
def __init__(self, event, event_time_ms):
self.event = event
self.event_time_ms = event_time_ms
self.start_time = millis()
def trigger(self):
if (millis() - self.start_time >= self.event_time_ms):
self.start_time = millis()
return True
return False
# This event is based on the debounced event and compares the state of a given
# digital pin to the trigger state and calls the event function if they're the
# same. Sets the pin to an input when created.
class DigitalTrigger(DebouncedEvent):
def __init__(self, digital_pin, trigger_state, event, debounce_ms, pull=0):
pinMode(digital_pin, INPUT, pull)
trigger = lambda: digitalRead(digital_pin) == trigger_state
super(DigitalTrigger, self).__init__(trigger, event, debounce_ms)
# This Event compares the value on the given analog pin to the trigger level
# and calls the event function if direction=1 and the value is above, or if
# direction=-1 and the value is below. Either looks at a single reading or a
# running average of size n_points.
class AnalogLevel(Event):
def __init__(self, analog_pin, threshold, event, direction=1, n_points=4):
self.analog_pin = analog_pin
self.threshold = threshold
self.event = event
if (n_points < 1): n_points = 1
# Construct the window regardless of n_points; will only be used if
# n_points > 1:
window = [0 if direction > 0 else 2**12 for i in range(n_points)]
self.window = deque(window)
self.direction = direction
self.n_points = n_points
def trigger(self):
if (self.n_points > 1):
self.window.popleft()
self.window.append(analogRead(self.analog_pin))
val = sum(self.window)/self.n_points
else:
val = analogRead(self.analog_pin)
if (self.direction > 0):
return True if val > self.threshold else False
return True if val < self.threshold else False
|
pybinding/chebyshev.py
|
lise1020/pybinding
| 159 |
120755
|
<filename>pybinding/chebyshev.py<gh_stars>100-1000
"""Computations based on Chebyshev polynomial expansion
The kernel polynomial method (KPM) can be used to approximate various functions by expanding them
in a series of Chebyshev polynomials.
"""
import warnings
import numpy as np
import scipy
from . import _cpp
from . import results
from .model import Model
from .system import System
from .utils.time import timed
from .support.deprecated import LoudDeprecationWarning
__all__ = ['KPM', 'kpm', 'kpm_cuda', 'SpatialLDOS',
'jackson_kernel', 'lorentz_kernel', 'dirichlet_kernel']
class SpatialLDOS:
"""Holds the results of :meth:`KPM.calc_spatial_ldos`
It behaves like a product of a :class:`.Series` and a :class:`.StructureMap`.
"""
def __init__(self, data, energy, structure):
self.data = data
self.energy = energy
self.structure = structure
def structure_map(self, energy):
"""Return a :class:`.StructureMap` of the spatial LDOS at the given energy
Parameters
----------
energy : float
Produce a structure map for LDOS data closest to this energy value.
Returns
-------
:class:`.StructureMap`
"""
idx = np.argmin(abs(self.energy - energy))
return self.structure.with_data(self.data[idx])
def ldos(self, position, sublattice=""):
"""Return the LDOS as a function of energy at a specific position
Parameters
----------
position : array_like
sublattice : Optional[str]
Returns
-------
:class:`.Series`
"""
idx = self.structure.find_nearest(position, sublattice)
return results.Series(self.energy, self.data[:, idx],
labels=dict(variable="E (eV)", data="LDOS", columns="orbitals"))
class KPM:
"""The common interface for various KPM implementations
It should not be created directly but via specific functions
like :func:`kpm` or :func:`kpm_cuda`.
All implementations are based on: https://doi.org/10.1103/RevModPhys.78.275
"""
def __init__(self, impl):
if isinstance(impl, Model):
raise TypeError("You're probably looking for `pb.kpm()` (lowercase).")
self.impl = impl
@property
def model(self) -> Model:
"""The tight-binding model holding the Hamiltonian"""
return self.impl.model
@model.setter
def model(self, model):
self.impl.model = model
@property
def system(self) -> System:
"""The tight-binding system (shortcut for `KPM.model.system`)"""
return System(self.impl.system, self.model.lattice)
@property
def scaling_factors(self) -> tuple:
"""A tuple of KPM scaling factors `a` and `b`"""
return self.impl.scaling_factors
@property
def kernel(self):
"""The damping kernel"""
return self.impl.kernel
def report(self, shortform=False):
"""Return a report of the last computation
Parameters
----------
shortform : bool, optional
Return a short one line version of the report
"""
return self.impl.report(shortform)
def __call__(self, *args, **kwargs):
warnings.warn("Use .calc_greens() instead", LoudDeprecationWarning)
return self.calc_greens(*args, **kwargs)
def moments(self, num_moments, alpha, beta=None, op=None):
r"""Calculate KPM moments in the form of expectation values
The result is an array of moments where each value is equal to:
.. math::
\mu_n = <\beta|op \cdot T_n(H)|\alpha>
Parameters
----------
num_moments : int
The number of moments to calculate.
alpha : array_like
The starting state vector of the KPM iteration.
beta : Optional[array_like]
If not given, defaults to :math:`\beta = \alpha`.
op : Optional[csr_matrix]
Operator in the form of a sparse matrix. If omitted, an identity matrix
is assumed: :math:`\mu_n = <\beta|T_n(H)|\alpha>`.
Returns
-------
ndarray
"""
from scipy.sparse import csr_matrix
if beta is None:
beta = []
if op is None:
op = csr_matrix([])
else:
op = op.tocsr()
return self.impl.moments(num_moments, alpha, beta, op)
def calc_greens(self, i, j, energy, broadening):
"""Calculate Green's function of a single Hamiltonian element
Parameters
----------
i, j : int
Hamiltonian indices.
energy : ndarray
Energy value array.
broadening : float
Width, in energy, of the smallest detail which can be resolved.
Lower values result in longer calculation time.
Returns
-------
ndarray
Array of the same size as the input `energy`.
"""
return self.impl.calc_greens(i, j, energy, broadening)
def calc_ldos(self, energy, broadening, position, sublattice="", reduce=True):
"""Calculate the local density of states as a function of energy
Parameters
----------
energy : ndarray
Values for which the LDOS is calculated.
broadening : float
Width, in energy, of the smallest detail which can be resolved.
Lower values result in longer calculation time.
position : array_like
Cartesian position of the lattice site for which the LDOS is calculated.
Doesn't need to be exact: the method will find the actual site which is
closest to the given position.
sublattice : str
Only look for sites of a specific sublattice, closest to `position`.
The default value considers any sublattice.
reduce : bool
This option is only relevant for multi-orbital models. If true, the
resulting LDOS will summed over all the orbitals at the target site
and the result will be a 1D array. If false, the individual orbital
results will be preserved and the result will be a 2D array with
`shape == (energy.size, num_orbitals)`.
Returns
-------
:class:`~pybinding.Series`
"""
ldos = self.impl.calc_ldos(energy, broadening, position, sublattice, reduce)
return results.Series(energy, ldos.squeeze(), labels=dict(variable="E (eV)", data="LDOS",
columns="orbitals"))
def calc_spatial_ldos(self, energy, broadening, shape, sublattice=""):
"""Calculate the LDOS as a function of energy and space (in the area of the given shape)
Parameters
----------
energy : ndarray
Values for which the LDOS is calculated.
broadening : float
Width, in energy, of the smallest detail which can be resolved.
Lower values result in longer calculation time.
shape : Shape
Determines the site positions at which to do the calculation.
sublattice : str
Only look for sites of a specific sublattice, within the `shape`.
The default value considers any sublattice.
Returns
-------
:class:`SpatialLDOS`
"""
ldos = self.impl.calc_spatial_ldos(energy, broadening, shape, sublattice)
smap = self.system[shape.contains(*self.system.positions)]
if sublattice:
smap = smap[smap.sub == sublattice]
return SpatialLDOS(ldos, energy, smap)
def calc_dos(self, energy, broadening, num_random=1):
"""Calculate the density of states as a function of energy
Parameters
----------
energy : ndarray
Values for which the DOS is calculated.
broadening : float
Width, in energy, of the smallest detail which can be resolved.
Lower values result in longer calculation time.
num_random : int
The number of random vectors to use for the stochastic calculation of KPM moments.
Larger numbers improve the quality of the result but also increase calculation time
linearly. Fortunately, result quality also improves with system size, so the DOS of
very large systems can be calculated accurately with only a small number of random
vectors.
Returns
-------
:class:`~pybinding.Series`
"""
dos = self.impl.calc_dos(energy, broadening, num_random)
return results.Series(energy, dos, labels=dict(variable="E (eV)", data="DOS"))
def deferred_ldos(self, energy, broadening, position, sublattice=""):
"""Same as :meth:`calc_ldos` but for parallel computation: see the :mod:`.parallel` module
Parameters
----------
energy : ndarray
Values for which the LDOS is calculated.
broadening : float
Width, in energy, of the smallest detail which can be resolved.
Lower values result in longer calculation time.
position : array_like
Cartesian position of the lattice site for which the LDOS is calculated.
Doesn't need to be exact: the method will find the actual site which is
closest to the given position.
sublattice : str
Only look for sites of a specific sublattice, closest to `position`.
The default value considers any sublattice.
Returns
-------
Deferred
"""
return self.impl.deferred_ldos(energy, broadening, position, sublattice)
def calc_conductivity(self, chemical_potential, broadening, temperature,
direction="xx", volume=1.0, num_random=1, num_points=1000):
"""Calculate Kubo-Bastin electrical conductivity as a function of chemical potential
The return value is in units of the conductance quantum (e^2 / hbar) not taking into
account spin or any other degeneracy.
The calculation is based on: https://doi.org/10.1103/PhysRevLett.114.116602.
Parameters
----------
chemical_potential : array_like
Values (in eV) for which the conductivity is calculated.
broadening : float
Width (in eV) of the smallest detail which can be resolved in the chemical potential.
Lower values result in longer calculation time.
temperature : float
Value of temperature for the Fermi-Dirac distribution.
direction : Optional[str]
Direction in which the conductivity is calculated. E.g., "xx", "xy", "zz", etc.
volume : Optional[float]
The volume of the system.
num_random : int
The number of random vectors to use for the stochastic calculation of KPM moments.
Larger numbers improve the quality of the result but also increase calculation time
linearly. Fortunately, result quality also improves with system size, so the DOS of
very large systems can be calculated accurately with only a small number of random
vectors.
num_points : Optional[int]
Number of points for integration.
Returns
-------
:class:`~pybinding.Series`
"""
data = self.impl.calc_conductivity(chemical_potential, broadening, temperature,
direction, num_random, num_points)
if volume != 1.0:
data /= volume
return results.Series(chemical_potential, data,
labels=dict(variable=r"$\mu$ (eV)", data="$\sigma (e^2/h)$"))
class _ComputeProgressReporter:
def __init__(self):
from .utils.progressbar import ProgressBar
self.pbar = ProgressBar(0)
def __call__(self, delta, total):
if total == 1:
return # Skip reporting for short jobs
if delta < 0:
print("Computing KPM moments...")
self.pbar.size = total
self.pbar.start()
elif delta == total:
self.pbar.finish()
else:
self.pbar += delta
def kpm(model, energy_range=None, kernel="default", num_threads="auto", silent=False, **kwargs):
"""The default CPU implementation of the Kernel Polynomial Method
This implementation works on any system and is well optimized.
Parameters
----------
model : Model
Model which will provide the Hamiltonian matrix.
energy_range : Optional[Tuple[float, float]]
KPM needs to know the lowest and highest eigenvalue of the Hamiltonian, before
computing the expansion moments. By default, this is determined automatically
using a quick Lanczos procedure. To override the automatic boundaries pass a
`(min_value, max_value)` tuple here. The values can be overestimated, but note
that performance drops as the energy range becomes wider. On the other hand,
underestimating the range will produce `NaN` values in the results.
kernel : Kernel
The kernel in the *Kernel* Polynomial Method. Used to improve the quality of
the function reconstructed from the Chebyshev series. Possible values are
:func:`jackson_kernel` or :func:`lorentz_kernel`. The Jackson kernel is used
by default.
num_threads : int
The number of CPU threads to use for calculations. This is automatically set
to the number of logical cores available on the current machine.
silent : bool
Don't show any progress messages.
Returns
-------
:class:`~pybinding.chebyshev.KPM`
"""
if kernel != "default":
kwargs["kernel"] = kernel
if num_threads != "auto":
kwargs["num_threads"] = num_threads
if "progress_callback" not in kwargs:
kwargs["progress_callback"] = _ComputeProgressReporter()
if silent:
del kwargs["progress_callback"]
return KPM(_cpp.kpm(model, energy_range or (0, 0), **kwargs))
def kpm_cuda(model, energy_range=None, kernel="default", **kwargs):
"""Same as :func:`kpm` except that it's executed on the GPU using CUDA (if supported)
See :func:`kpm` for detailed parameter documentation.
This method is only available if the C++ extension module was compiled with CUDA.
Parameters
----------
model : Model
energy_range : Optional[Tuple[float, float]]
kernel : Kernel
Returns
-------
:class:`~pybinding.chebyshev.KPM`
"""
try:
if kernel != "default":
kwargs["kernel"] = kernel
# noinspection PyUnresolvedReferences
return KPM(_cpp.kpm_cuda(model, energy_range or (0, 0), **kwargs))
except AttributeError:
raise Exception("The module was compiled without CUDA support.\n"
"Use a different KPM implementation or recompile the module with CUDA.")
def jackson_kernel():
"""The Jackson kernel -- a good general-purpose kernel, appropriate for most applications
Imposes Gaussian broadening `sigma = pi / N` where `N` is the number of moments. The
broadening value is user-defined for each function calculation (LDOS, Green's, etc.).
The number of moments is then determined based on the broadening -- it's not directly
set by the user.
"""
return _cpp.jackson_kernel()
def lorentz_kernel(lambda_value=4.0):
"""The Lorentz kernel -- best for Green's function
This kernel is most appropriate for the expansion of the Green’s function because it most
closely mimics the divergences near the true eigenvalues of the Hamiltonian. The Lorentzian
broadening is given by `epsilon = lambda / N` where `N` is the number of moments.
Parameters
----------
lambda_value : float
May be used to fine-tune the smoothness of the convergence. Usual values are
between 3 and 5. Lower values will speed up the calculation at the cost of
accuracy. If in doubt, leave it at the default value of 4.
"""
return _cpp.lorentz_kernel(lambda_value)
def dirichlet_kernel():
"""The Dirichlet kernel -- returns raw moments, least favorable choice
This kernel doesn't modify the moments at all. The resulting moments represent just
a truncated series which results in lots of oscillation in the reconstructed function.
Therefore, this kernel should almost never be used. It's only here in case the raw
moment values are needed for some other purpose. Note that `required_num_moments()`
returns `N = pi / sigma` for compatibility with the Jackson kernel, but there is
no actual broadening associated with the Dirichlet kernel.
"""
return _cpp.dirichlet_kernel()
class _PythonImpl:
"""Basic Python/SciPy implementation of KPM"""
def __init__(self, model, energy_range, kernel, **_):
self.model = model
self.energy_range = energy_range
self.kernel = kernel
self._stats = {}
@property
def stats(self):
class AttrDict(dict):
"""Allows dict items to be retrieved as attributes: d["item"] == d.item"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__dict__ = self
s = AttrDict(self._stats)
s.update({k: v.elapsed for k, v in s.items() if "_time" in k})
s["eps"] = s["nnz"] / s["moments_time"]
return s
def _scaling_factors(self):
"""Compute the energy bounds of the model and return the appropriate KPM scaling factors"""
def find_bounds():
if self.energy_range[0] != self.energy_range[1]:
return self.energy_range
from scipy.sparse.linalg import eigsh
h = self.model.hamiltonian
self.energy_range = [eigsh(h, which=x, k=1, tol=2e-3, return_eigenvectors=False)[0]
for x in ("SA", "LA")]
return self.energy_range
with timed() as self._stats["bounds_time"]:
emin, emax = find_bounds()
self._stats["energy_min"] = emin
self._stats["energy_max"] = emax
tolerance = 0.01
a = 0.5 * (emax - emin) * (1 + tolerance)
b = 0.5 * (emax + emin)
return a, b
def _rescale_hamiltonian(self, h, a, b):
size = h.shape[0]
with timed() as self._stats["rescale_time"]:
return (h - b * scipy.sparse.eye(size)) * (2 / a)
def _compute_diagonal_moments(self, num_moments, starter, h2):
"""Procedure for computing KPM moments when the two vectors are identical"""
r0 = starter.copy()
r1 = h2.dot(r0) * 0.5
moments = np.zeros(num_moments, dtype=h2.dtype)
moments[0] = np.vdot(r0, r0) * 0.5
moments[1] = np.vdot(r1, r0)
for n in range(1, num_moments // 2):
r0 = h2.dot(r1) - r0
r0, r1 = r1, r0
moments[2 * n] = 2 * (np.vdot(r0, r0) - moments[0])
moments[2 * n + 1] = 2 * np.vdot(r1, r0) - moments[1]
self._stats["num_moments"] = num_moments
self._stats["nnz"] = h2.nnz * num_moments / 2
self._stats["vector_memory"] = r0.nbytes + r1.nbytes
self._stats["matrix_memory"] = (h2.data.nbytes + h2.indices.nbytes + h2.indptr.nbytes
if isinstance(h2, scipy.sparse.csr_matrix) else 0)
return moments
@staticmethod
def _exval_starter(h2, index):
"""Initial vector for the expectation value procedure"""
r0 = np.zeros(h2.shape[0], dtype=h2.dtype)
r0[index] = 1
return r0
@staticmethod
def _reconstruct_real(moments, energy, a, b):
"""Reconstruct a real function from KPM moments"""
scaled_energy = (energy - b) / a
ns = np.arange(moments.size)
k = 2 / (a * np.pi)
return np.array([k / np.sqrt(1 - w**2) * np.sum(moments.real * np.cos(ns * np.arccos(w)))
for w in scaled_energy])
def _ldos(self, index, energy, broadening):
"""Calculate the LDOS at the given Hamiltonian index"""
a, b = self._scaling_factors()
num_moments = self.kernel.required_num_moments(broadening / a)
h2 = self._rescale_hamiltonian(self.model.hamiltonian, a, b)
starter = self._exval_starter(h2, index)
with timed() as self._stats["moments_time"]:
moments = self._compute_diagonal_moments(num_moments, starter, h2)
with timed() as self._stats["reconstruct_time"]:
moments *= self.kernel.damping_coefficients(num_moments)
return self._reconstruct_real(moments, energy, a, b)
def calc_ldos(self, energy, broadening, position, sublattice="", reduce=True):
"""Calculate the LDOS at the given position/sublattice"""
with timed() as self._stats["total_time"]:
system_index = self.model.system.find_nearest(position, sublattice)
ham_idx = self.model.system.to_hamiltonian_indices(system_index)
result_data = np.array([self._ldos(i, energy, broadening) for i in ham_idx]).T
if reduce:
return np.sum(result_data, axis=1)
else:
return result_data
def report(self, *_):
from .utils import with_suffix, pretty_duration
stats = self.stats.copy()
stats.update({k: with_suffix(stats[k]) for k in ("num_moments", "eps")})
stats.update({k: pretty_duration(v) for k, v in stats.items() if "_time" in k})
fmt = " ".join([
"{energy_min:.2f}, {energy_max:.2f} [{bounds_time}]",
"[{rescale_time}]",
"{num_moments} @ {eps}eps [{moments_time}]",
"[{reconstruct_time}]",
"| {total_time}"
])
return fmt.format_map(stats)
def _kpm_python(model, energy_range=None, kernel="default", **kwargs):
"""Basic Python/SciPy implementation of KPM"""
if kernel == "default":
kernel = jackson_kernel()
return KPM(_PythonImpl(model, energy_range or (0, 0), kernel, **kwargs))
|
duecredit/tests/test__main__.py
|
sanjaymsh/duecredit
| 213 |
120765
|
# emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the duecredit package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
import sys
import pytest
from six.moves import StringIO
from .. import __main__, __version__
from .. import due
def test_main_help(monkeypatch):
# Patch stdout
fakestdout = StringIO()
monkeypatch.setattr(sys, "stdout", fakestdout)
pytest.raises(SystemExit, __main__.main, ['__main__.py', '--help'])
assert(
fakestdout.getvalue().startswith(
"Usage: %s -m duecredit [OPTIONS] <file> [ARGS]\n" % sys.executable))
def test_main_version(monkeypatch):
# Patch stdout
fakestdout = StringIO()
monkeypatch.setattr(sys, "stdout", fakestdout)
pytest.raises(SystemExit, __main__.main, ['__main__.py', '--version'])
assert fakestdout.getvalue().rstrip() == "duecredit %s" % __version__
def test_main_run_a_script(tmpdir, monkeypatch):
tempfile = str(tmpdir.mkdir("sub").join("tempfile.txt"))
content = b'print("Running the script")\n'
with open(tempfile, 'wb') as f:
f.write(content)
# Patch stdout
fakestdout = StringIO()
monkeypatch.setattr(sys, "stdout", fakestdout)
# Patch due.activate
count = [0]
def count_calls(*args, **kwargs):
count[0] += 1
monkeypatch.setattr(due, "activate", count_calls)
__main__.main(['__main__.py', tempfile])
assert fakestdout.getvalue().rstrip() == "Running the script"
# And we have "activated" the due
assert count[0] == 1
|
web/search/apps.py
|
ChiChou/wiggle
| 110 |
120772
|
<reponame>ChiChou/wiggle
from django.apps import AppConfig
class WiggleConfig(AppConfig):
name = 'wiggle'
|
sdk/webpubsub/azure-messaging-webpubsubservice/azure/messaging/webpubsubservice/aio.py
|
rsdoherty/azure-sdk-for-python
| 207 |
120774
|
<gh_stars>100-1000
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
__all__ = ["WebPubSubServiceClient"]
from typing import TYPE_CHECKING
from copy import deepcopy
import azure.core.pipeline as corepipeline
import azure.core.pipeline.policies as corepolicies
import azure.core.pipeline.transport as coretransport
# Temporary location for types that eventually graduate to Azure Core
from .core import rest as corerest
from ._policies import JwtCredentialPolicy
if TYPE_CHECKING:
import azure.core.credentials as corecredentials
from azure.core.pipeline.policies import HTTPPolicy, SansIOHTTPPolicy
from typing import Any, List, cast # pylint: disable=ungrouped-imports
class WebPubSubServiceClient(object):
def __init__(self, endpoint, credential, **kwargs):
# type: (str, corecredentials.AzureKeyCredential, Any) -> None
"""Create a new WebPubSubServiceClient instance
:param endpoint: Endpoint to connect to.
:type endpoint: ~str
:param credential: Credentials to use to connect to endpoint.
:type credential: ~azure.core.credentials.AzureKeyCredential
:keyword api_version: Api version to use when communicating with the service.
:type api_version: str
:keyword user: User to connect as. Optional.
:type user: ~str
"""
self.endpoint = endpoint.rstrip("/")
transport = kwargs.pop("transport", None) or coretransport.RequestsTransport(
**kwargs
)
policies = [
corepolicies.HeadersPolicy(**kwargs),
corepolicies.UserAgentPolicy(**kwargs),
corepolicies.AsyncRetryPolicy(**kwargs),
corepolicies.ProxyPolicy(**kwargs),
corepolicies.CustomHookPolicy(**kwargs),
corepolicies.AsyncRedirectPolicy(**kwargs),
JwtCredentialPolicy(credential, kwargs.get("user", None)),
corepolicies.NetworkTraceLoggingPolicy(**kwargs),
] # type: Any
self._pipeline = corepipeline.AsyncPipeline(
transport,
policies,
) # type: corepipeline.AsyncPipeline
def _format_url(self, url):
# type: (str) -> str
assert self.endpoint[-1] != "/", "My endpoint should not have a trailing slash"
return "/".join([self.endpoint, url.lstrip("/")])
async def send_request(
self, http_request: corerest.HttpRequest, **kwargs: "Any"
) -> corerest.AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
We have helper methods to create requests specific to this service in `azure.messaging.webpubsub.rest`.
Use these helper methods to create the request you pass to this method. See our example below:
>>> from azure.messaging.webpubsub.rest import build_healthapi_get_health_status_request
>>> request = build_healthapi_get_health_status_request(api_version)
<HttpRequest [HEAD], url: '/api/health'>
>>> response = await client.send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/llcwiki
For advanced cases, you can also create your own :class:`~azure.messaging.webpubsub.core.rest.HttpRequest`
and pass it in.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.messaging.webpubsub.core.rest.HttpRequest
:keyword bool stream_response: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.messaging.webpubsub.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(http_request)
request_copy.url = self._format_url(request_copy.url)
# can't do AsyncStreamContextManager yet. This client doesn't have a pipeline client,
# AsyncStreamContextManager requires a pipeline client. WIll look more into it
# if kwargs.pop("stream_response", False):
# return corerest._AsyncStreamContextManager(
# client=self._client,
# request=request_copy,
# )
pipeline_response = await self._pipeline.run(
request_copy._internal_request, **kwargs # pylint: disable=protected-access
)
response = corerest.AsyncHttpResponse(
status_code=pipeline_response.http_response.status_code,
request=request_copy,
_internal_response=pipeline_response.http_response,
)
await response.read()
return response
|
test/units/test_oci_resource_utils.py
|
slmjy/oci-ansible-modules
| 106 |
120798
|
# Copyright (c) 2018, 2019 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
from nose.plugins.skip import SkipTest
from ansible.module_utils.oracle import oci_resource_utils
try:
import oci
from oci.util import to_dict
from oci.object_storage.models import PreauthenticatedRequest
from oci.exceptions import ServiceError, ClientError
except ImportError:
raise SkipTest("test_oci_resource_utils.py requires `oci` module")
EXAMPLE_COMPARTMENT_ID = "ocid1.compartment.oc1..xxxxxEXAMPLExxxxx"
EXAMPLE_AD = "IxGV:US-ASHBURN-AD-1"
def test_convert_input_data_to_model_class():
data = {"availability_domain": EXAMPLE_AD, "compartment_id": EXAMPLE_COMPARTMENT_ID}
model = oci_resource_utils.convert_input_data_to_model_class(
data, oci.core.models.LaunchInstanceDetails
)
assert isinstance(model, oci.core.models.LaunchInstanceDetails)
assert model.availability_domain == EXAMPLE_AD
assert model.compartment_id == EXAMPLE_COMPARTMENT_ID
def test_convert_input_data_to_model_class_nested_input_type():
vnic_display_name = "my_vnic"
subnet_id = "ocid1.subnet.oc1..xxxxxEXAMPLExxxxx"
data = {
"availability_domain": EXAMPLE_AD,
"compartment_id": EXAMPLE_COMPARTMENT_ID,
"create_vnic_details": {
"display_name": vnic_display_name,
"subnet_id": subnet_id,
},
}
model = oci_resource_utils.convert_input_data_to_model_class(
data, oci.core.models.LaunchInstanceDetails
)
assert isinstance(model, oci.core.models.LaunchInstanceDetails)
assert model.availability_domain == EXAMPLE_AD
assert model.compartment_id == EXAMPLE_COMPARTMENT_ID
assert model.create_vnic_details.display_name == vnic_display_name
assert model.create_vnic_details.subnet_id == subnet_id
def test_convert_input_data_to_model_class_preserve_casing_of_user_supplied_dictionary():
metadata_key1 = "my_MetdataKey1"
metadata_key2 = "my_metadata_key_2"
metadata_value1 = "value_1"
metadata_value2 = "value_2"
data = {
"availability_domain": EXAMPLE_AD,
"compartment_id": EXAMPLE_COMPARTMENT_ID,
"metadata": {metadata_key1: metadata_value1, metadata_key2: metadata_value2},
}
model = oci_resource_utils.convert_input_data_to_model_class(
data, oci.core.models.LaunchInstanceDetails
)
assert isinstance(model, oci.core.models.LaunchInstanceDetails)
assert model.availability_domain == EXAMPLE_AD
assert model.compartment_id == EXAMPLE_COMPARTMENT_ID
assert len(model.metadata) == 2
assert model.metadata[metadata_key1] == metadata_value1
assert model.metadata[metadata_key2] == metadata_value2
def test_convert_input_data_to_model_class_nested_list_type():
vcn_id = "ocid1.vcn.oc1..xxxxxEXAMPLExxxxx"
cidr_block1 = "10.0.0.1/16"
destination_type1 = "CIDR_BLOCK"
cidr_block2 = "10.0.0.1/24"
destination_type2 = "SERVICE_CIDR_BLOCK"
data = {
"vcn_id": vcn_id,
"compartment_id": EXAMPLE_COMPARTMENT_ID,
"route_rules": [
{"cidr_block": cidr_block1, "destination_type": destination_type1},
{"cidr_block": cidr_block2, "destination_type": destination_type2},
],
}
model = oci_resource_utils.convert_input_data_to_model_class(
data, oci.core.models.CreateRouteTableDetails
)
assert isinstance(model, oci.core.models.CreateRouteTableDetails)
assert model.vcn_id == vcn_id
assert model.compartment_id == EXAMPLE_COMPARTMENT_ID
assert len(model.route_rules) == 2
assert model.route_rules[0].cidr_block == cidr_block1
assert model.route_rules[0].destination_type == destination_type1
assert model.route_rules[1].cidr_block == cidr_block2
assert model.route_rules[1].destination_type == destination_type2
def test_convert_input_data_to_model_class_nested_dict_type():
display_name = "my_load_balancer"
private_key1 = "private_key1"
public_certificate1 = "public_certificate1"
private_key2 = "private_key2"
public_certificate2 = "public_certificate2"
data = {
"display_name": display_name,
"compartment_id": EXAMPLE_COMPARTMENT_ID,
"certificates": {
"certificate_1": {
"private_key": private_key1,
"public_certificate": public_certificate1,
},
"certificate_2": {
"private_key": private_key2,
"public_certificate": public_certificate2,
},
},
}
model = oci_resource_utils.convert_input_data_to_model_class(
data, oci.load_balancer.models.CreateLoadBalancerDetails
)
assert isinstance(model, oci.load_balancer.models.CreateLoadBalancerDetails)
assert model.display_name == display_name
assert model.compartment_id == EXAMPLE_COMPARTMENT_ID
assert len(model.certificates) == 2
assert model.certificates["certificate_1"].private_key == private_key1
assert model.certificates["certificate_1"].public_certificate == public_certificate1
assert model.certificates["certificate_2"].private_key == private_key2
assert model.certificates["certificate_2"].public_certificate == public_certificate2
def test_convert_input_data_to_model_class_polymorphic_input_type():
source_type = "image"
image_id = "ocid1.image.oc1..xxxxxEXAMPLExxxxx"
data = {"source_type": source_type, "image_id": image_id}
model = oci_resource_utils.convert_input_data_to_model_class(
data, oci.core.models.InstanceSourceDetails
)
assert isinstance(model, oci.core.models.InstanceSourceViaImageDetails)
assert model.image_id == image_id
assert model.source_type == source_type
def test_convert_input_data_to_model_class_polymorphic_input_type_ignore_fields_that_dont_match_discriminator_value():
source_type = "image"
image_id = "ocid1.image.oc1..xxxxxEXAMPLExxxxx"
boot_volume_id = "ocid1.bootvolume.oc1..xxxxxEXAMPLExxxxx"
data = {
"source_type": source_type,
"image_id": image_id,
"boot_volume_id": boot_volume_id,
}
# expect boot_volume_id is ignored because it does not exist on InstanceSourceViaImageDetails which is
# the type that corresponds to sourceType = image
model = oci_resource_utils.convert_input_data_to_model_class(
data, oci.core.models.InstanceSourceDetails
)
assert isinstance(model, oci.core.models.InstanceSourceViaImageDetails)
assert model.image_id == image_id
assert model.source_type == source_type
|
flambe/utils/config.py
|
ethan-asapp/flambe
| 148 |
120826
|
<gh_stars>100-1000
import os
import re
from typing import Dict
import jinja2
def generate_config_from_template(template_path: str,
config_path: str,
remove_comments: bool = False,
**template_kwargs: Dict[str, str]):
"""
Parameters
----------
template_path: str
The path to the config template
config_path: str
The path to which the rendered config should be written
remove_comments: bool
If `True`, removes comments from the rendered config before
writing it to disk
template_kwargs:
Keyword arguments to pass to your template, e.g.
`path='config.yaml', foo='bar'`
Example config:
```yaml
!Experiment
foo: {{ bar }}
baz: {{ skittles }}
```
If saved as config.yaml.template, then invoking:
```python
generate_config_from_template('config.yaml.template',
'config.yaml', bar='pickles', skittles='yum')
```
the following config will be written to 'config.yaml':
```yaml
!Experiment
foo: pickles
baz: yum
```
"""
dirname = os.path.dirname(template_path)
basename = os.path.basename(template_path)
loader = jinja2.FileSystemLoader(searchpath=dirname)
env = jinja2.Environment(loader=loader, autoescape=True)
template = env.get_template(basename)
with open(config_path, 'w') as f:
for line in template.render(**template_kwargs).split('\n'):
if remove_comments:
line = re.sub('# .*', '', line).rstrip()
if line:
f.write(line + '\n')
|
qd/cae/D3plot.py
|
martinventer/musical-dollop
| 104 |
120828
|
<reponame>martinventer/musical-dollop<gh_stars>100-1000
from ._dyna_utils import plot_parts, _parse_element_result, _extract_elem_coords
from .dyna_cpp import QD_D3plot, QD_Part
import os
import numpy as np
class D3plot(QD_D3plot):
__doc__ = QD_D3plot.__doc__
def compare_scatter(self,
filepath_list,
element_result,
pid_filter_list=None,
kMappingNeighbors=4,
export_filepath=None,
**kwargs):
'''Compare the scatter between mutliple d3plot files
Parameters
----------
filepath_list : list(str)
list of filepaths of d3plot for comparison
element_result : str or function(element)
element results to compare. Either specify a user defined
function or use predefined results. Available are
disp, plastic_strain or energy.
pid_filter_list : list(int)
list of pids to filter for optionally
kMappingNeighbors : int
number of neighbors used for nearest neighbor mapping
export_filepath : str
optional filepath for saving. If none, the model
is exported to a temporary file and shown in the
browser.
**kwargs : further arguments
additional arguments passed on to d3plot constructor (e.g. read_states)
Notes
-----
The file calling the function will be the basis for the comparison.
The other files results will be mapped onto this mesh. The scatter is
computed between all runs as maximum difference.
Examples
--------
>>> # Settings (don't forget to load the correct vars!)
>>> state_vars = ["stress_mises max"]
>>> other_files = ["path/to/d3plot_2", "path/to/d3plot_3"]
>>>
>>> # Element eval function (used for scatter computation)
>>> def elem_eval_fun(elem):
>>> result = elem.get_stress_mises()
>>> if len(result):
>>> return result[-1]
>>> return 0.
>>>
>>> # load base file
>>> d3plot = D3plot("path/to/d3plot", read_states=state_vars)
>>>
>>> # compute and plot scatter
>>> d3plot.compare_scatter(other_files, elem_eval_fun, read_states=state_vars)
'''
from sklearn.neighbors import KDTree
if pid_filter_list == None:
pid_filter_list = []
# yay checks :)
assert isinstance(filepath_list, (list, tuple, np.ndarray))
assert all(isinstance(entry, str) for entry in filepath_list)
assert all(os.path.isfile(filepath) for filepath in filepath_list)
assert isinstance(element_result, str) or callable(element_result)
assert all(isinstance(entry, int) for entry in pid_filter_list)
assert kMappingNeighbors > 0
# prepare evaluation
read_vars_str, eval_function = _parse_element_result(element_result)
if read_vars_str != None:
kwargs['read_states'] = read_vars_str
# base run element coords
if not pid_filter_list:
pid_filter_list = [part.get_id() for part in self.get_parts()]
base_mesh_coords, base_mesh_results = _extract_elem_coords(
self.get_partByID(pid_filter_list),
element_result=eval_function,
element_type="shell")
# init vars for comparison
element_result_max = base_mesh_results
element_result_min = base_mesh_results
del base_mesh_results
# loop other files
for _filepath in filepath_list:
# new mesh with results
_d3plot = D3plot(_filepath, **kwargs)
_d3plot_elem_coords, _d3plot_elem_results = _extract_elem_coords(
_d3plot.get_partByID(pid_filter_list),
element_result=eval_function,
iTimestep=0,
element_type="shell")
del _d3plot # deallocate c++ stuff
# compute mapping
_tree = KDTree(_d3plot_elem_coords)
distances, mapping_indexes = _tree.query(base_mesh_coords,
return_distance=True,
sort_results=False,
k=kMappingNeighbors)
distances = np.exp(distances)
distances = distances / \
np.sum(distances, axis=1)[:, None] # softmax weights
# map results
_d3plot_elem_results = np.sum(
distances * _d3plot_elem_results[mapping_indexes], axis=1)
# update min and max
element_result_max = np.max(
_d3plot_elem_results, element_result_max)
element_result_min = np.min(
_d3plot_elem_results, element_result_min)
# compute scatter
element_result_max = element_result_max - element_result_min
del element_result_min
# plot scatter, sometimes I like it dirty
data = [0] # does not work otherwise ...
def eval_scatter(elem):
data[0] = data[0] + 1
return element_result_max[data[0] - 1]
self.plot(element_result=eval_scatter, export_filepath=export_filepath)
def plot(self, iTimestep=0, element_result=None, fringe_bounds=[None, None], export_filepath=None):
'''Plot the D3plot, currently shells only!
Parameters
----------
iTimestep : int
timestep at which to plot the D3plot
element_result : str or function
which type of results to use as fringe
None means no fringe is used.
When using string as arg you may use plastic_strain or energy.
Function shall take elem as input and return a float value (for fringe)
fringe_bounds : list(float,float) or tuple(float,float)
bounds for the fringe, default will use min and max value
export_filepath : str
optional filepath for saving. If none, the model
is exported to a temporary file and shown in the
browser.
Examples
--------
Load a d3plot and plot it's geometry
>>> d3plot = D3plot("path/to/d3plot")
>>> d3plot.plot() # just geometry
Read the state data and plot in deformed state
>>> # read state data
>>> d3plot.read_states(["disp","plastic_strain max"])
>>> d3plot.plot(iTimestep=-1) # last state
Use a user-defined element evaluation function for fringe colors.
>>> # User defined evaluation function
>>> def eval_fun(element):
>>> res = element.get_plastic_strain()
>>> if len(res): # some elements may miss plastic strain
>>> return res[-1] # last timestep
>>>
>>> d3plot.plot(iTimestep=-1, element_result=eval_fun, fringe_bounds=[0, 0.05])
'''
plot_parts(self.get_parts(),
iTimestep=iTimestep,
element_result=element_result,
fringe_bounds=fringe_bounds,
export_filepath=export_filepath)
@staticmethod
def plot_parts(parts, iTimestep=0, element_result=None, fringe_bounds=[None, None], export_filepath=None):
'''Plot a selected group of parts.
Parameters
----------
parts : Part or list(Part)
parts to plot. May be from different files.
iTimestep : int
timestep at which to plot the D3plot
element_result : str or function
which type of results to use as fringe
None means no fringe is used
When using string as arg you may use plastic_strain or energy.
Function shall take elem as input and return a float value (for fringe)
fringe_bounds : list(float,float) or tuple(float,float)
bounds for the fringe, default will use min and max value
export_filepath : str
optional filepath for saving. If none, the model
is exported to a temporary file and shown in the
browser.
Notes
-----
Can be applied to parts, coming from different files.
Examples
--------
For a full description of plotting functionality, see `d3plot.plot`.
Load a d3plot and plot a part from it:
>>> d3plot_1 = D3plot("path/to/d3plot_1")
>>> part_1 = d3plot_1.get_partByID(1)
>>> D3plot.plot_parts( [part_1] ) # static function!
Read a second d3plot and plot both parts at once
>>> d3plot_2 = D3plot("path/to/d3plot_2") # different file!
>>> part_2 = d3plot_2.get_partByID(14)
>>> D3plot.plot_parts( [part_1, part_2] )
'''
if not isinstance(parts, (tuple, list)):
parts = [parts]
assert all(isinstance(part, QD_Part)
for part in parts), "At least one list entry is not a part"
plot_parts(parts,
iTimestep=iTimestep,
element_result=element_result,
fringe_bounds=fringe_bounds,
export_filepath=export_filepath)
|
test/integration/expected_out_concat/multiple.py
|
Inveracity/flynt
| 487 |
120844
|
"""
This module will be transformed... into something far greater.
"""
a = "Hello"
msg = f"{a} World"
msg2 = f"Finally, {a} World"
print(msg)
|
ex9_4_2_tfwithkeras.py
|
soyoung9306/-3-keras
| 200 |
120853
|
<gh_stars>100-1000
import tensorflow as tf
sess = tf.Session()
from keras import backend as K
K.set_session(sess)
# 분류 DNN 모델 구현 ########################
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout
from keras.metrics import categorical_accuracy, categorical_crossentropy
class DNN():
def __init__(self, Nin, Nh_l, Nout):
self.X_ph = tf.placeholder(tf.float32, shape=(None, Nin))
self.L_ph = tf.placeholder(tf.float32, shape=(None, Nout))
# Modeling
H = Dense(Nh_l[0], activation='relu')(self.X_ph)
H = Dropout(0.5)(H)
H = Dense(Nh_l[1], activation='relu')(H)
H = Dropout(0.25)(H)
self.Y_tf = Dense(Nout, activation='softmax')(H)
# Operation
self.Loss_tf = tf.reduce_mean(
categorical_crossentropy(self.L_ph, self.Y_tf))
self.Train_tf = tf.train.AdamOptimizer().minimize(self.Loss_tf)
self.Acc_tf = categorical_accuracy(self.L_ph, self.Y_tf)
self.Init_tf = tf.global_variables_initializer()
# 데이터 준비 ##############################
import numpy as np
from keras import datasets # mnist
from keras.utils import np_utils # to_categorical
def Data_func():
(X_train, y_train), (X_test, y_test) = datasets.mnist.load_data()
Y_train = np_utils.to_categorical(y_train)
Y_test = np_utils.to_categorical(y_test)
L, W, H = X_train.shape
X_train = X_train.reshape(-1, W * H)
X_test = X_test.reshape(-1, W * H)
X_train = X_train / 255.0
X_test = X_test / 255.0
return (X_train, Y_train), (X_test, Y_test)
# 학습 효과 분석 ##############################
from keraspp.skeras import plot_loss, plot_acc
import matplotlib.pyplot as plt
def run(model, data, sess, epochs, batch_size=100):
# epochs = 2
# batch_size = 100
(X_train, Y_train), (X_test, Y_test) = data
sess.run(model.Init_tf)
with sess.as_default():
N_tr = X_train.shape[0]
for epoch in range(epochs):
for b in range(N_tr // batch_size):
X_tr_b = X_train[batch_size * (b-1):batch_size * b]
Y_tr_b = Y_train[batch_size * (b-1):batch_size * b]
model.Train_tf.run(feed_dict={model.X_ph: X_tr_b, model.L_ph: Y_tr_b, K.learning_phase(): 1})
loss = sess.run(model.Loss_tf, feed_dict={model.X_ph: X_test, model.L_ph: Y_test, K.learning_phase(): 0})
acc = model.Acc_tf.eval(feed_dict={model.X_ph: X_test, model.L_ph: Y_test, K.learning_phase(): 0})
print("Epoch {0}: loss = {1:.3f}, acc = {2:.3f}".format(epoch, loss, np.mean(acc)))
# 분류 DNN 학습 및 테스팅 ####################
def main():
Nin = 784
Nh_l = [100, 50]
number_of_class = 10
Nout = number_of_class
data = Data_func()
model = DNN(Nin, Nh_l, Nout)
run(model, data, sess, 10, 100)
if __name__ == '__main__':
main()
|
indy_node/test/auth_rule/auth_framework/disable_taa.py
|
Rob-S/indy-node
| 627 |
120859
|
import json
import pytest
from indy.ledger import build_acceptance_mechanisms_request
from indy_common.authorize.auth_actions import ADD_PREFIX
from indy_common.authorize.auth_constraints import AuthConstraint, IDENTITY_OWNER
from indy_common.types import Request
from indy_node.test.auth_rule.auth_framework.basic import AuthTest
from indy_node.test.helper import build_auth_rule_request_json
from plenum.common.constants import TXN_AUTHOR_AGREEMENT_DISABLE, TXN_TYPE
from plenum.common.exceptions import RequestRejectedException
from plenum.common.util import randomString, get_utc_epoch
from plenum.test.helper import sdk_get_and_check_replies, sdk_sign_request_from_dict
from plenum.test.pool_transactions.helper import sdk_add_new_nym, sdk_sign_and_send_prepared_request
from plenum.test.txn_author_agreement.helper import sdk_send_txn_author_agreement
class TAADisableTest(AuthTest):
def __init__(self, env, action_id):
super().__init__(env, action_id)
def prepare(self):
self.default_auth_rule = self.get_default_auth_rule()
self.changed_auth_rule = self.get_changed_auth_rule()
req = self.taa_aml_request()
rep = sdk_sign_and_send_prepared_request(self.looper, self.trustee_wallet, self.sdk_pool_handle, req)
sdk_get_and_check_replies(self.looper, [rep])
self.send_taa()
def taa_aml_request(self):
return self.looper.loop.run_until_complete(build_acceptance_mechanisms_request(
self.trustee_wallet[1],
json.dumps({
'Nice way': 'very good way to accept agreement'}),
randomString(), randomString()))
def send_taa(self):
sdk_send_txn_author_agreement(self.looper, self.sdk_pool_handle, self.trustee_wallet,
randomString(10), randomString(5), ratified=get_utc_epoch())
def get_changed_auth_rule(self):
self.new_default_wallet = sdk_add_new_nym(self.looper, self.sdk_pool_handle, self.trustee_wallet,
role=IDENTITY_OWNER)
constraint = AuthConstraint(role=IDENTITY_OWNER,
sig_count=1,
need_to_be_owner=False)
return build_auth_rule_request_json(
self.looper, self.trustee_wallet[1],
auth_action=ADD_PREFIX,
auth_type=TXN_AUTHOR_AGREEMENT_DISABLE,
field='*',
new_value='*',
constraint=constraint.as_dict
)
def send_taa_disable_req(self, wallet):
operation = {TXN_TYPE: TXN_AUTHOR_AGREEMENT_DISABLE}
req = sdk_sign_request_from_dict(self.looper, wallet, operation)
self.send_and_check(json.dumps(req), wallet)
def run(self):
# Step 1. Change auth rule
self.send_and_check(self.changed_auth_rule, wallet=self.trustee_wallet)
# Step 2. Check, that we cannot do txn the old way
with pytest.raises(RequestRejectedException):
self.send_taa_disable_req(self.trustee_wallet)
# Step 3. Check, that new auth rule is used
self.send_taa_disable_req(self.new_default_wallet)
# Step 4. Return default auth rule
self.send_and_check(self.default_auth_rule, wallet=self.trustee_wallet)
# Step 5. Check, that default auth rule works
self.send_taa()
self.send_taa_disable_req(self.trustee_wallet)
def result(self):
pass
def down(self):
pass
|
calculator/use__simpleeval__module.py
|
DazEB2/SimplePyScripts
| 117 |
120890
|
<filename>calculator/use__simpleeval__module.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install simpleeval
from simpleeval import simple_eval
print(simple_eval("21 + 21")) # 42
print(simple_eval("2 + 2 * 2")) # 6
print(simple_eval('10 ** 123')) # 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
print(simple_eval("21 + 19 / 7 + (8 % 3) ** 9")) # 535.7142857142857
print(simple_eval("square(11)", functions={"square": lambda x: x * x})) # 121
|
persia/logger.py
|
dumpmemory/PERSIA
| 283 |
120923
|
import logging
from typing import Optional
from colorlog import ColoredFormatter
class levelFilter(logging.Filter):
r"""Log level filter.
Arguments:
level (int): filter log level. Only logs with level higher than ``level`` will be kept.
"""
def __init__(self, level: int):
self.level = level
def filter(self, record: logging.LogRecord) -> bool:
"""Filter the log record whose level is greater than the preset log level.
Arguments:
record (logging.LogRecord): callback function input record items.
"""
return record.levelno > self.level
STREAM_LOG_FORMAT = "%(log_color)s%(asctime)s %(levelname)-8s%(reset)s %(blue)s[%(filename)s:%(lineno)d]%(reset)s %(log_color)s%(message)s"
FILE_LOG_FORMAT = "%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s"
DEFAULT_LOGGER_NAME = "log"
_default_logger = None
LOG_COLOR = {
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red,bg_white",
}
COLOR_FORMATTER = ColoredFormatter(
STREAM_LOG_FORMAT,
datefmt=None,
reset=True,
log_colors=LOG_COLOR,
secondary_log_colors={},
style="%",
)
FORMATTER = logging.Formatter(
FILE_LOG_FORMAT,
datefmt=None,
style="%",
)
def setLogger(
name: str,
log_level: int = logging.DEBUG,
log_filename: str = "train.log",
enable_file_logger: bool = False,
err_redirect_filepath: str = "error.log",
enable_err_redirect: bool = False,
err_redirect_level: int = logging.INFO,
) -> logging.Logger:
r"""Helper function to simplify the logger setup process with provided
log_level and log_filename. Also makes it possible to redirect logs
above a certain level to a different file.
Arguments:
name (str): logger name
log_filename (str): log filename
enable_file_logger (bool): whether enable save log into file
err_redirect_filepath (str): err log redirect filepath
enable_err_redirect (bool): whether enable err log redirect
err_redirect_level (int): error redirect log level
"""
logger = logging.getLogger(name)
handler = logging.StreamHandler()
handler.setFormatter(COLOR_FORMATTER)
logger.addHandler(handler)
logger.setLevel(log_level)
if enable_file_logger:
file_normal_handler = logging.FileHandler(log_filename, mode="a")
file_normal_handler.setFormatter(FORMATTER)
logger.addHandler(file_normal_handler)
if enable_err_redirect:
file_error_handler = logging.FileHandler(err_redirect_filepath, mode="a")
file_error_handler.setFormatter(FORMATTER)
file_error_handler.addFilter(levelFilter(err_redirect_level))
logger.addHandler(file_error_handler)
return logger
def get_logger(name: str) -> logging.Logger:
r"""Get logger by name.
Arguments:
name (str): logger name.
"""
return logging.getLogger(name)
def _set_default_logger(name: str, **kwargs) -> logging.Logger:
r"""Set the default logger.
Arguments:
name (str): default logger name.
logging.Logger
"""
global _default_logger
if not _default_logger:
_default_logger = setLogger(name, **kwargs)
return _default_logger
def get_default_logger(name: Optional[str] = None, **kwargs) -> logging.Logger:
r"""Get the default logger. If default logger is not set, init the default by
the given name.
Arguments:
name (str, optional): logger name.
"""
if _default_logger is None:
_set_default_logger(name or DEFAULT_LOGGER_NAME, **kwargs)
return _default_logger
|
tests/test.py
|
ebmhu2/pysqlcipher
| 108 |
120927
|
<filename>tests/test.py<gh_stars>100-1000
from pysqlcipher import dbapi2 as sqlite
conn = sqlite.connect('test.db')
c = conn.cursor()
c.execute("PRAGMA key='testaverylongpasswordisthisokey'")
c.execute("create table stocks (date text, trans text, symbol text, qty real, price real)")
c.execute("""insert into stocks values ('2006-01-05','BUY','RHAT',100,35.14)""")
conn.commit()
c.close()
|
www/tests/compression/huffman.py
|
raspberrypieman/brython
| 5,926 |
120955
|
<gh_stars>1000+
class ResizeError(Exception):
pass
def codelengths_from_frequencies(freqs):
freqs = sorted(freqs.items(),
key=lambda item: (item[1], -item[0]), reverse=True)
nodes = [Node(char=key, weight=value) for (key, value) in freqs]
while len(nodes) > 1:
right, left = nodes.pop(), nodes.pop()
node = Node(weight=right.weight + left.weight)
node.add([left, right])
if not nodes:
nodes.append(node)
else:
pos = 0
while pos < len(nodes) and nodes[pos].weight > node.weight:
pos += 1
nodes.insert(pos, node)
top = nodes[0]
tree = Tree(top)
tree.reduce(15)
codes = tree.codes()
code_items = list(codes.items())
code_items.sort(key=lambda item:(len(item[1]), item[0]))
return [(car, len(value)) for car, value in code_items]
def normalized(codelengths):
car, codelength = codelengths[0]
value = 0
codes = {car: "0" * codelength}
for (newcar, nbits) in codelengths[1:]:
value += 1
bvalue = str(bin(value))[2:]
bvalue = "0" * (codelength - len(bvalue)) + bvalue
if nbits > codelength:
codelength = nbits
bvalue += "0" * (codelength - len(bvalue))
value = int(bvalue, 2)
assert len(bvalue) == nbits
codes[newcar] = bvalue
return codes
class Tree:
def __init__(self, root):
self.root = root
self.nb_levels = 0
def length(self):
self.root.level = 0
node = self.root
nb_levels = 0
def set_level(node):
nonlocal nb_levels
for child in node.children:
child.level = node.level + 1
nb_levels = max(nb_levels, child.level)
if not child.is_leaf:
set_level(child)
set_level(self.root)
return nb_levels
def reduce_tree(self):
"""Change the tree to reduce the number of levels.
Uses the algorithm described in
http://compressions.sourceforge.net/Huffman.html#3
"""
currentlen = self.length()
deepest = self.nodes_at(currentlen)
deepest_leaves = [node for node in deepest if node.is_leaf]
rightmost_leaf = deepest_leaves[-1]
sibling = rightmost_leaf.parent.children[0]
# replace rightmost_leaf's parent by rightmost_leaf
parent = rightmost_leaf.parent
grand_parent = parent.parent
rank = grand_parent.children.index(parent)
children = grand_parent.children
children[rank] = rightmost_leaf
grand_parent.add(children)
# find first upper level with leaves
up_level = rightmost_leaf.level - 2
while up_level > 0:
nodes = self.nodes_at(up_level)
leaf_nodes = [node for node in nodes if node.is_leaf]
if leaf_nodes:
leftmost_leaf = leaf_nodes[0]
# replace by node with leaves = [sibling, leftmost_leaf]
parent = leftmost_leaf.parent
rank = parent.children.index(leftmost_leaf)
new_node = Node()
new_node.level = leftmost_leaf.level
children = [sibling, leftmost_leaf]
new_node.add(children)
parent.children[rank] = new_node
new_node.parent = parent
break
else:
up_level -= 1
if up_level == 0:
raise ResizeError
def nodes_at(self, level, top=None):
"""Return list of all the nodes below top at specified level."""
res = []
if top is None:
top = self.root
if top.level == level:
res = [top]
elif not top.is_leaf:
for child in top.children:
res += self.nodes_at(level, child)
return res
def reduce(self, maxlevels):
"""Reduce number of levels to maxlevels, if possible."""
while self.length() > maxlevels:
self.reduce_tree()
def codes(self, node=None, code=''):
"""Returns a dictionary mapping leaf characters to the Huffman code
of the node, as a string of 0's and 1's."""
if node is None:
self.dic = {}
node = self.root
if node.is_leaf:
self.dic[node.char] = code
else:
for i, child in enumerate(node.children):
self.codes(child, code + str(i))
return self.dic
class Node:
def __init__(self, char=None, weight=0, level=0):
self.char = char
self.is_leaf = char is not None
self.level = level
self.weight = weight
self.height = 0
def add(self, children):
self.children = children
for child in self.children:
child.parent = self
child.level = self.level + 1
self.height = max(self.height, children[0].height + 1,
children[1].height + 1)
node = self
while hasattr(node, "parent"):
node.parent.height = max(node.parent.height, node.height + 1)
node = node.parent
def __repr__(self):
if self.is_leaf:
return f'{chr(self.char)!r}'
else:
return f'{self.children}'
class Compresser:
def __init__(self, text):
if not isinstance(text, (bytes, bytearray, memoryview)):
raise TypeError("a bytes-like object is required, not '" +
type(text).__name__ + "'")
self.text = text
freqs = {}
for car in self.text:
freqs[car] = freqs.get(car, 0) + 1
self.codelengths = codelengths_from_frequencies(freqs)
self.codes = normalized(self.codelengths)
self.max_codelength = max(len(v) for v in self.codes.values())
def compressed_bytes(self):
compressed = self.compressed_str() + self.codes[256]
out = bytearray()
pos = 0
while pos < len(compressed):
bits = compressed[pos:pos + 8]
byte = int(bits, 2)
if len(bits) < 8:
byte <<= (8 - len(bits))
out.append(byte)
pos += 8
return out
def compressed_str(self):
return ''.join(self.codes[car] for car in self.text)
class Decompresser:
def __init__(self, compressed, codelengths):
self.compressed = compressed
codes = normalized(codelengths)
self.codes = {value : key for key, value in codes.items()}
self.root = Node()
self.make_tree(self.root)
def make_tree(self, node):
if node is self.root:
node.code = ''
children = []
for bit in '01':
next_code = node.code + bit
if next_code in self.codes:
child = Node(char=self.codes[next_code])
else:
child = Node()
child.code = next_code
children.append(child)
node.add(children)
for child in children:
if not child.is_leaf:
self.make_tree(child)
def decompress(self):
source = self.compressed
if isinstance(source, (bytes, bytearray)):
return self.decompress_bytes()
pos = 0
node = self.root
res = bytearray()
while pos < len(source):
code = int(source[pos])
child = node.children[code]
if child.is_leaf:
res.append(child)
node = self.root
else:
node = child
pos += 1
return bytes(res)
def decompress_bytes(self):
source = self.compressed
pos = 0
node = self.root
res = bytearray()
while pos < len(source):
byte = source[pos]
mask = 128
while mask > 0:
code = bool(byte & mask)
child = node.children[code]
if child.is_leaf:
if child.char == 256:
break # end of block
res.append(child.char)
node = self.root
else:
node = child
mask >>= 1
pos += 1
return res
def compress(text, klass=bytes):
compr = Compresser(text)
result = {"codelengths": compr.codelengths}
if klass is bytes:
result["data"] = compr.compressed_bytes()
elif klass is str:
result["data"] = compr.compressed_str()
else:
raise TypeError("second argument of compress must be bytes or "
"str, not '{}'".format(klass))
return result
def decompress(data, codelengths):
decomp = Decompresser(data, codelengths)
return decomp.decompress()
|
examples/parallel_blame.py
|
Yobmod/git-pandas
| 202 |
120999
|
from gitpandas import Repository
import time
__author__ = 'willmcginnis'
if __name__ == '__main__':
g = Repository(working_dir='..')
st = time.time()
blame = g.cumulative_blame(branch='master', include_globs=['*.py', '*.html', '*.sql', '*.md'], limit=None, skip=None)
print(blame.head())
print(time.time() - st)
st = time.time()
blame = g.parallel_cumulative_blame(branch='master', include_globs=['*.py', '*.html', '*.sql', '*.md'], limit=None, skip=None, workers=4)
print(blame.head())
print(time.time() - st)
|
examples/watch.py
|
Bakkom/micropython
| 594 |
121056
|
######################################################
# A watch (as in a small clock for your wrist or pocket)
#
# Button A sets the mode: Clock or Setting time
# Button B
# in clock mode: shows the time as a scrolling display
# in setting mode: increments the time
#
# The LED array displays the clock time in the format hh:mm.
# The digits of the time are represented by columns of LEDs.
#
# The digits 1 - 5 are represented by more LEDs being lit from
# the bottom up.
#
# For instance the digit 3 would look like:
#
# .
# .
# X
# X
# X
#
#
# The digits 6 - 9 are represented by LEDs being turned off from
# the bottom up. The digit 6 would look like:
#
# X
# X
# X
# X
# .
#
# The centre column is a colon flashing once a second to separate hours from minutes.
#
# The time 17:49 would look like:
#
# . X . . X
# . X . X .
# . X . X .
# . . . X .
# X . . X .
#
#
######################################################
from microbit import *
# Tweak CLOCK_ADJUST to make your system clock more accurate.
# My clock is too fast by 4 seconds every minute so I use 4/60.
# If your clock is too slow by 3 seconds every minute use -3/60.
CLOCK_ADJUST = 4/60
last_button_a_state = False
last_button_b_state = False
last_display_time = 0
base_time = 0
mode = 0
modes = {0:"clock", 1:"set h", 2:"mx10", 3:"m"}
def decode_time(milliseconds):
"""Converts a time in milliseconds into a string with hours:minutes,"""
mins = int(milliseconds / (1000 * 60) % 60)
hrs = int(milliseconds / (1000 * 60 * 60) % 24)
return "{h:0>2}:{m:0>2}".format(h=hrs, m=mins)
def show_time(time):
time_string = decode_time(time)
for i in range(5):
if time_string[i].isdigit():
d = int(time_string[i])
plot_LED_column(i, d)
show_colon(mode==0 and int((time / 1000) % 2))
def show_colon(visible):
display.set_pixel(2, 1, visible*9)
display.set_pixel(2, 3, visible*9)
def get_clock_time():
global base_time
sys_time = running_time() / (1 + CLOCK_ADJUST)
time = (sys_time - base_time) % (24 * 60 * 60 * 1000)
base_time = sys_time - time
return time
def plot_LED_column(column, number):
"""plots a column of LEDs to represent a number from 0 - 9"""
if number > 9:
number = 9
if number <= 5:
for i in range(4, -1, -1):
if i < 5 - number:
display.set_pixel(column, i, 0)
else:
display.set_pixel(column, i, 9)
if number > 5:
for i in range(4, -1, -1):
if i < 5 - (number - 5):
display.set_pixel(column, i, 9)
else:
display.set_pixel(column, i, 0)
while True:
# detect a change in button A's state, the Mode button
button_a_state = button_a.is_pressed()
if button_a_state != last_button_a_state:
last_button_a_state = button_a_state
#increment the mode
if button_a_state == True:
mode = (mode + 1) % 4
display.scroll(modes[mode])
show_time(get_clock_time())
# detect a change in button B's state, the increment / select button
button_b_state = button_b.is_pressed()
if button_b_state != last_button_b_state:
last_button_b_state = button_b_state
if button_b_state == True:
# button B's action depends on the current mode
if mode == 0: #show time
display.scroll(decode_time(get_clock_time()))
elif mode == 1: #setting time: increment hour units
base_time = base_time - (60 * 60 * 1000)
elif mode == 2: #setting time: increment minute tens
base_time = base_time - (10 * 60 * 1000)
elif mode == 3: #setting time: increment minute units
base_time = base_time - (60 * 1000)
show_time(get_clock_time())
#If in clock mode update the display every second
if mode == 0:
display_time = running_time() - last_display_time
if display_time >= 1000:
last_display_time = display_time
show_time(get_clock_time())
sleep(100)
|
examples/node_prediction/citation_gcn_custom.py
|
JonaBecher/spektral
| 2,145 |
121090
|
"""
This script is a proof of concept to train GCN as fast as possible and with as
little lines of code as possible.
It uses a custom training function instead of the standard Keras fit(), and
can train GCN for 200 epochs in a few tenths of a second (~0.20 on a GTX 1050).
"""
import tensorflow as tf
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.optimizers import Adam
from spektral.datasets.citation import Cora
from spektral.layers import GCNConv
from spektral.models.gcn import GCN
from spektral.transforms import AdjToSpTensor, LayerPreprocess
from spektral.utils import tic, toc
tf.random.set_seed(seed=0) # make weight initialization reproducible
# Load data
dataset = Cora(normalize_x=True, transforms=[LayerPreprocess(GCNConv), AdjToSpTensor()])
graph = dataset[0]
x, a, y = graph.x, graph.a, graph.y
mask_tr, mask_va, mask_te = dataset.mask_tr, dataset.mask_va, dataset.mask_te
model = GCN(n_labels=dataset.n_labels, n_input_channels=dataset.n_node_features)
optimizer = Adam(lr=1e-2)
loss_fn = CategoricalCrossentropy()
# Training step
@tf.function
def train():
with tf.GradientTape() as tape:
predictions = model([x, a], training=True)
loss = loss_fn(y[mask_tr], predictions[mask_tr])
loss += sum(model.losses)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
# Time the execution of 200 epochs of training
train() # Warm up to ignore tracing times when timing
tic()
for epoch in range(1, 201):
loss = train()
toc("Spektral - GCN (200 epochs)")
print(f"Final loss = {loss}")
|
tornado/run_test.py
|
nikicc/anaconda-recipes
| 130 |
121121
|
from tornado.httpserver import HTTPServer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.