content
stringlengths
0
894k
type
stringclasses
2 values
"""Test app factory method.""" from pytest import MonkeyPatch from app import create_app def test_app_factory_method(monkeypatch: MonkeyPatch) -> None: """Test that application test settings are correct.""" app = create_app(testing=True) assert app.testing class Recorder: dsn: str environment: str def fake_init(dsn: str, environment: str) -> None: Recorder.dsn = dsn Recorder.environment = environment monkeypatch.setattr("app.SENTRY_DSN", "http://fake.org") monkeypatch.setattr("sentry_sdk.init", fake_init) app = create_app() assert not app.testing assert Recorder.dsn == "http://fake.org" assert Recorder.environment == "dev"
python
import numpy as np import matplotlib.pyplot as plt import matplotlib import PIL from PIL import Image def display_network(A, filename='weights.jpg', opt_normalize = True): """ This function visualizes filters in matrix A. Each column of A is a filter. We will reshape each column into a square image and visualizes on each cell of the visualization panel. All other parameters are optional, usually you do not need to worry about it. opt_normalize: whether we need to normalize the filter so that all of them can have similar contrast. Default value is true. opt_graycolor: whether we use gray as the heat map. Default is true. opt_colmajor: you can switch convention to row major for A. In thatcase, each row of A is a filter. Default value is false. """ # Rescale A = A - np.average(A) # Compute rows & cols (row, col) = A.shape sz = int(np.ceil(np.sqrt(row))) buf = 1 n = int(np.ceil(np.sqrt(col))) m = int(np.ceil(col / n)) image = np.ones(shape=(buf + m * (sz + buf), buf + n * (sz + buf))) k = 0 for i in range(int(m)): for j in range(int(n)): clim = np.max(np.abs(A[:, k])) # print("[DEBUG] {{i={}, j={}}} clim={}, np_max={}".format(i, j, np.max(np.abs(A[:, k])), np.max(np.abs(A)))) if opt_normalize: image[buf + i * (sz + buf):buf + i * (sz + buf) + sz, buf + j * (sz + buf):buf + j * (sz + buf) + sz] = \ A[:, k].reshape(sz, sz) / clim else: image[buf + i * (sz + buf):buf + i * (sz + buf) + sz, buf + j * (sz + buf):buf + j * (sz + buf) + sz] = \ A[:, k].reshape(sz, sz) / np.max(np.abs(A)) k += 1 # image = (image + 1) / 2 * 255 # image = image.astype(np.uint8) # Image.fromarray(image, 'L').show() plt.imsave(filename, image, cmap=matplotlib.cm.gray) def display_color_network(A, filename='weights.png'): """ # display receptive field(s) or basis vector(s) for image patches # # A the basis, with patches as column vectors # In case the midpoint is not set at 0, we shift it dynamically :param A: :param file: :return: """ if np.min(A) >= 0: A = A - np.mean(A) cols = np.round(np.sqrt(A.shape[1])) channel_size = A.shape[0] / 3 dim = np.sqrt(channel_size) dimp = dim + 1 rows = np.ceil(A.shape[1] / cols) B = A[0:channel_size, :] C = A[channel_size:2 * channel_size, :] D = A[2 * channel_size:3 * channel_size, :] B = B / np.max(np.abs(B)) C = C / np.max(np.abs(C)) D = D / np.max(np.abs(D)) # Initialization of the image image = np.ones(shape=(dim * rows + rows - 1, dim * cols + cols - 1, 3)) for i in range(int(rows)): for j in range(int(cols)): # This sets the patch image[i * dimp:i * dimp + dim, j * dimp:j * dimp + dim, 0] = B[:, i * cols + j].reshape(dim, dim) image[i * dimp:i * dimp + dim, j * dimp:j * dimp + dim, 1] = C[:, i * cols + j].reshape(dim, dim) image[i * dimp:i * dimp + dim, j * dimp:j * dimp + dim, 2] = D[:, i * cols + j].reshape(dim, dim) image = (image + 1) / 2 # PIL.Image.fromarray(np.uint8(image * 255), 'RGB').save(filename) PIL.Image.fromarray(np.uint8(image * 255), 'RGB').show()
python
# Copyright 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. { 'variables': { 'chromium_code': 1, 'grit_out_dir': '<(SHARED_INTERMEDIATE_DIR)/ios/chrome', 'ui_string_overrider_inputs': [ '<(SHARED_INTERMEDIATE_DIR)/components/strings/grit/components_locale_settings.h', '<(SHARED_INTERMEDIATE_DIR)/components/strings/grit/components_strings.h', '<(SHARED_INTERMEDIATE_DIR)/ios/chrome/grit/ios_strings.h', ], 'ui_string_overrider_output_basename': 'ios/chrome/browser/variations/ios_ui_string_overrider_factory', 'ui_string_overrider_script_name': '../../components/variations/service/generate_ui_string_overrider.py', 'conditions': [ ['branding=="Chromium"', { 'ui_string_overrider_inputs': [ '<(SHARED_INTERMEDIATE_DIR)/components/strings/grit/components_chromium_strings.h', '<(SHARED_INTERMEDIATE_DIR)/ios/chrome/grit/ios_chromium_strings.h', ], }], ['branding=="Chrome"', { 'ui_string_overrider_inputs': [ '<(SHARED_INTERMEDIATE_DIR)/components/strings/grit/components_google_chrome_strings.h', '<(SHARED_INTERMEDIATE_DIR)/ios/chrome/grit/ios_google_chrome_strings.h', ], }], ], }, 'targets': [ { 'target_name': 'ios_chrome_resources', 'type': 'none', 'dependencies': [ 'ios_resources_gen', 'ios_strings_gen', 'ios_theme_resources_gen', ], }, { # GN version: //ios/chrome/app/strings 'target_name': 'ios_strings_gen', 'type': 'none', 'hard_dependency': 1, 'actions': [ { # GN version: //ios/chrome/app/strings:ios_strings 'action_name': 'generate_ios_strings', 'variables': { 'grit_grd_file': 'app/strings/ios_strings.grd', }, 'includes': [ '../../build/grit_action.gypi' ], }, { # GN version: //ios/chrome/app/strings:ios_chromium_strings 'action_name': 'generate_ios_chromium_strings', 'variables': { 'grit_grd_file': 'app/strings/ios_chromium_strings.grd', }, 'includes': [ '../../build/grit_action.gypi' ], }, { # GN version: //ios/chrome/app/strings:ios_google_chrome_strings 'action_name': 'generate_ios_google_chrome_strings', 'variables': { 'grit_grd_file': 'app/strings/ios_google_chrome_strings.grd', }, 'includes': [ '../../build/grit_action.gypi' ], }, ], 'direct_dependent_settings': { 'include_dirs': [ '<(SHARED_INTERMEDIATE_DIR)', ], }, }, { # GN version: //ios/chrome/app/resources 'target_name': 'ios_resources_gen', 'type': 'none', 'hard_dependency': 1, 'actions': [ { 'action_name': 'ios_resources', 'variables': { 'grit_grd_file': 'app/resources/ios_resources.grd', }, 'includes': [ '../../build/grit_action.gypi' ], }, ], 'direct_dependent_settings': { 'include_dirs': [ '<(SHARED_INTERMEDIATE_DIR)', ], }, }, { # GN version: //ios/chrome/app/theme 'target_name': 'ios_theme_resources_gen', 'type': 'none', 'hard_dependency': 1, 'actions': [ { 'action_name': 'ios_theme_resources', 'variables': { 'grit_grd_file': 'app/theme/ios_theme_resources.grd', }, 'includes': [ '../../build/grit_action.gypi' ], }, ], 'direct_dependent_settings': { 'include_dirs': [ '<(SHARED_INTERMEDIATE_DIR)', ], }, }, { 'target_name': 'ios_packed_resources', 'type': 'none', 'dependencies': [ '../../components/components_strings.gyp:components_strings', '../../net/net.gyp:net_resources', '../../ui/resources/ui_resources.gyp:ui_resources', '../../ui/strings/ui_strings.gyp:ui_strings', 'ios_chrome_resources', ], 'actions': [ { 'action_name': 'repack_ios_locales', 'variables': { 'repack_locales_path': 'tools/build/ios_repack_locales.py', }, 'inputs': [ '<(repack_locales_path)', '<!@pymod_do_main(ios_repack_locales -i ' '-s <(SHARED_INTERMEDIATE_DIR) ' '-x <(SHARED_INTERMEDIATE_DIR)/repack_ios ' '-b <(branding_path_component) ' '<(locales))' ], 'outputs': [ '<!@pymod_do_main(ios_repack_locales -o ' '-s <(SHARED_INTERMEDIATE_DIR) ' '-x <(SHARED_INTERMEDIATE_DIR)/repack_ios ' '<(locales))' ], 'action': [ 'python', '<(repack_locales_path)', '-x', '<(SHARED_INTERMEDIATE_DIR)/repack_ios', '-s', '<(SHARED_INTERMEDIATE_DIR)', '-b', '<(branding_path_component)', '<@(locales)', ], }, { 'action_name': 'repack_ios_resources_100_percent', 'variables': { 'pak_inputs': [ '<(SHARED_INTERMEDIATE_DIR)/components/components_resources_100_percent.pak', '<(SHARED_INTERMEDIATE_DIR)/ios/chrome/ios_theme_resources_100_percent.pak', '<(SHARED_INTERMEDIATE_DIR)/ui/resources/ui_resources_100_percent.pak', ], 'pak_output': '<(SHARED_INTERMEDIATE_DIR)/repack_ios/chrome_100_percent.pak', }, 'includes': [ '../../build/repack_action.gypi' ], }, { 'action_name': 'repack_ios_resources_200_percent', 'variables': { 'pak_inputs': [ '<(SHARED_INTERMEDIATE_DIR)/components/components_resources_200_percent.pak', '<(SHARED_INTERMEDIATE_DIR)/ios/chrome/ios_theme_resources_200_percent.pak', '<(SHARED_INTERMEDIATE_DIR)/ui/resources/ui_resources_200_percent.pak', ], 'pak_output': '<(SHARED_INTERMEDIATE_DIR)/repack_ios/chrome_200_percent.pak', }, 'includes': [ '../../build/repack_action.gypi' ], }, { 'action_name': 'repack_ios_resources_300_percent', 'variables': { 'pak_inputs': [ '<(SHARED_INTERMEDIATE_DIR)/components/components_resources_300_percent.pak', '<(SHARED_INTERMEDIATE_DIR)/ios/chrome/ios_theme_resources_300_percent.pak', '<(SHARED_INTERMEDIATE_DIR)/ui/resources/ui_resources_300_percent.pak', ], 'pak_output': '<(SHARED_INTERMEDIATE_DIR)/repack_ios/chrome_300_percent.pak', }, 'includes': [ '../../build/repack_action.gypi' ], }, { 'action_name': 'repack_ios_resources', 'variables': { 'pak_inputs': [ '<(SHARED_INTERMEDIATE_DIR)/components/components_resources.pak', '<(SHARED_INTERMEDIATE_DIR)/ios/chrome/ios_resources.pak', '<(SHARED_INTERMEDIATE_DIR)/net/net_resources.pak', '<(SHARED_INTERMEDIATE_DIR)/ui/resources/webui_resources.pak', ], 'pak_output': '<(SHARED_INTERMEDIATE_DIR)/repack_ios/resources.pak', }, 'includes': [ '../../build/repack_action.gypi' ], }, ], }, { 'target_name': 'ios_chrome_ui_string_overrider_factory_gen', 'type': 'none', 'hard_dependency': 1, 'direct_dependent_settings': { 'include_dirs': [ '<(SHARED_INTERMEDIATE_DIR)', ], }, 'dependencies': [ '../../components/components_strings.gyp:components_strings', 'ios_strings_gen', ], 'actions': [ { 'action_name': 'generate_ios_ui_string_overrider', 'inputs': [ '<(ui_string_overrider_script_name)', '<@(ui_string_overrider_inputs)', ], 'outputs': [ '<(SHARED_INTERMEDIATE_DIR)/<(ui_string_overrider_output_basename).cc', '<(SHARED_INTERMEDIATE_DIR)/<(ui_string_overrider_output_basename).h', ], 'action': [ 'python', '<(ui_string_overrider_script_name)', '-o', '<(SHARED_INTERMEDIATE_DIR)', '-S', '<(ui_string_overrider_output_basename).cc', '-H', '<(ui_string_overrider_output_basename).h', '<@(ui_string_overrider_inputs)', ], }, ], }, { 'target_name': 'ios_chrome_ui_string_overrider_factory', 'type': 'static_library', 'dependencies': [ '../../components/components.gyp:variations_service', 'ios_chrome_ui_string_overrider_factory_gen', ], 'sources': [ '<(SHARED_INTERMEDIATE_DIR)/<(ui_string_overrider_output_basename).cc', '<(SHARED_INTERMEDIATE_DIR)/<(ui_string_overrider_output_basename).h', ], }, ], }
python
import json from os import path from subprocess import run, PIPE from typing import Dict def get_key_signer(key_name: str, keys_dir: str) -> Dict: with open(path.join(keys_dir, key_name + ".json"), "r") as f: return json.load(f) def get_key_multisig_addr(key_name: str) -> str: p = run(('secretcli', 'keys', 'list'), stdout=PIPE, stderr=PIPE) res = '' for key in filter(lambda x: x['name'] == key_name, json.loads(p.stdout)): res = key['address'] if not res: raise RuntimeError(f"No key account with required name: {key_name}") return res def get_viewing_key(a_address: str, secret_contract_address: str) -> str: # get view key json_q = '{"create_viewing_key": {"entropy": "random phrase"}}' view_key_tx_hash = run(f"docker exec secretdev secretcli tx compute execute {secret_contract_address} " f"'{json_q}' --from {a_address} --gas 3000000 -b block -y | jq '.txhash'", shell=True, stdout=PIPE) view_key_tx_hash = view_key_tx_hash.stdout.decode().strip()[1:-1] view_key = run(f"docker exec secretdev secretcli q compute tx {view_key_tx_hash} | jq '.output_log' | " f"jq '.[0].attributes[1].value'", shell=True, stdout=PIPE).stdout.decode().strip()[1:-1] return view_key
python
class BitVector(object): """docstring for BitVector""" """infinite array of bits is present in bitvector""" def __init__(self): self.BitNum=0 self.length=0 def set(self,i): self.BitNum=self.BitNum | 1 << i self.length=self.BitNum.bit_length() def reset(self,i): resetValue=1<<i self.BitNum=self.BitNum - resetValue self.length=self.BitNum.bit_length() def at(self,i): if(i<0): raise ValueError if(i >=self.length): return 0 return int(bin(self.BitNum)[-(i+1)]) def __repr__(self): return bin(self.BitNum)[2:] def __str__(self): return bin(self.BitNum)[2:]
python
#!/usr/bin/env python # coding=utf-8 from __future__ import unicode_literals, print_function, division import sys import binascii from diameterparser.decode_diameter import decode_diameter def convertMac(octet): mac = [binascii.b2a_hex(x) for x in list(octet)] return "".join(mac) class DiameterConn: def __init__(self): self.diameter = decode_diameter() def decode(self, input_hex): headerinfo, tree = self.diameter.decode(convertMac(input_hex)) return headerinfo, tree
python
# Copyright (c) 2013, 2018 National Technology and Engineering Solutions of Sandia, LLC. # Under the terms of Contract DE-NA0003525 with National Technology and Engineering Solutions # of Sandia, LLC, the U.S. Government retains certain rights in this software. # standard library import os import hashlib import pickle import time import base64 import inspect import queue import threading # 3rd party library import cherrypy # local imports import slycat.web.server # public exports from this module __all__ = ["CacheError", "Cache"] # error catching for the cache class CacheError(Exception): """ generic cached object error """ pass class TimeError(CacheError): """ time error used for when the time is in the wrong format """ pass class LifetimeError(CacheError): """ extension of the cached error where the lifetime of the cache object has expired """ pass # a cached object consists of a value and an expiration # as well as a thread lock class CachedObjectWrapper(object): """ class used to wrap any object placed in the cache """ # lock on cached object __lock = threading.Lock() def __init__(self, value, expiration=None): """ creates a cached object with a cached items and an expiration :param value: item being wrapped :param expiration: time until the item is expire :return: not used """ self._value = value self._expiration = expiration @property def lock(self): """ threading.Lock() used to control crud operations to the cache. :return: """ return self.__lock @property def value(self): """ returns the object that is being wrapped by the cache :return: object """ return self._value @property def expiration(self): """ return the expiration time for the cached object, could return none if there is no expiration :return: expiration object """ return self._expiration @expiration.setter def expiration(self,expiration): """ set the expiration time for the cached object, could return none if there is no expiration :return: expiration object """ self._expiration = expiration def expired(self): """ return true or false as to weather the object is expired or not returns false if none :return: boolean """ if self.expiration is None: expired = False else: expired = (self.expiration < time.time()) return expired class Cache(object): """ decorator class used to cache """ # lock on entire cache _lock = threading.Lock() def __init__(self, fs_cache_path=None, **kwargs): """ takes a filepath and and the following time stamps - years (31,556,900 seconds per year) - months (2,629,740 seconds per month) - weeks (604,800 seconds per week) - days (86,400 seconds per day) - hours (3600 seconds per hour) - minutes (60 seconds per minute) - seconds - None :param path: path as a string to the :param kwargs: time stamp """ if kwargs: self._init_expire_time = self.to_seconds(**kwargs) # we need a time greater than 0 if self._init_expire_time <= 0: msg = "[CACHE] Lifetime (%s seconds) is 0 or less." % self._init_expire_time cherrypy.log.error(msg) raise LifetimeError(msg) else: # no expiration time self._init_expire_time = None # set up an in memory cache self._loaded = {} # set path for file system if fs_cache_path: self._fs_cache_path = os.path.abspath(fs_cache_path) # make cache directory unless it already exists if not os.path.exists(self._fs_cache_path): os.makedirs(self._fs_cache_path) else: self._fs_cache_path = None def check_fs_path(self): """ This function is used to set the file path as it does not exist when the cache is created in the server/__init__.py :return: """ # creates slycat web server cache, if it doesn't already exist if not self._fs_cache_path: cherrypy.log.error("[CACHE] %s is the cache location." % (slycat.web.server.config["slycat-web-server"]["cache-store"])) self._fs_cache_path = os.path.abspath( slycat.web.server.config["slycat-web-server"]["cache-store"]) if not os.path.exists(self._fs_cache_path): os.makedirs(self._fs_cache_path) def __getitem__(self, key): """ get the item from the cache :param key: hashed key for item in cache :return: value associate with key or None if not found """ # check for slycat path self.check_fs_path() # is item in cache? if key in self: # get hash and value digest = self.digest_hash(key) value = self._loaded[digest].value expired = self._loaded[digest].expired() # if expired, erase and return None if expired: self.expire(digest) return None else: return None # cherrypy.log.error("[CACHE] Retrieving %s from cache." % str(digest)) return value def __setitem__(self, key, value): """ set the key:value in the cache. if it is already in the cache it gets replaced by new value :param key: hashed representation of the function :param value: stored result from the function :return: not used """ # create slycat file path if it doesn't exist self.check_fs_path() # get hash and path digest_hash = self.digest_hash(key) path = os.path.join(self._fs_cache_path, digest_hash) # if item exists, erase it if (digest_hash in self._loaded) or os.path.exists(path): self.expire(digest_hash) # create new copy in cache cached_contents = CachedObjectWrapper(value, expiration=self.cached_item_expire_time()) self.write(cached_contents, path) self._loaded[digest_hash] = cached_contents # cherrypy.log.error ("[CACHE] Added %s to cache." % str(digest_hash)) def __delitem__(self, digest_hash): """ Removes the hash keyed object from memory but not from the filesystem. see function expire to remove from both :param key: item to be removed from memory :return: not used """ # check slycat path self.check_fs_path() if digest_hash in self._loaded: del self._loaded[digest_hash] else: msg = "[CACHE] Cannot delete object at %s -- not loaded in memory" % str(digest_hash) raise CacheError(msg) def __contains__(self, item): """ check if item is in the cache, true if in the cache false otherwise :param item: item to search for in cache :return: boolean """ # check for slycat path self.check_fs_path() # create hash from item digest = self.digest_hash(item) # get the item from the cache if digest in self._loaded: value = self._loaded[digest] # item was not in memory, check file system else: try: value = self._load(digest, item) except CacheError: # item was not in the cache or the file system return False # check if it has expired if value.expired(): # cherrypy.log.error("[CACHE] value is expired for %s." % str(item)) # contents were expired so we should delete them and return false self.expire(digest) return False return True def __call__(self, f): """ This is the decorator cache call :param f: function to be wrapped :return: results of the function either from the cache or the function itself """ # retrieve function id? function_meta_data = inspect.getmembers(f) try: fid = (function_meta_data.__name__, inspect.getargspec(f)) except (AttributeError, TypeError): fid = (f.__name__, repr(type(f))) def _f(*args, **kwargs): key = (fid, args, kwargs) # check if we have cached the result if key in self: result = self[key] # adding a null guard if result is None: # cherrypy.log.error("[CACHE] Cache key error adding object to cache.") result = f(*args, **kwargs) self[key] = result # we have not cached the result so lets get it else: # cherrypy.log.error("[CACHE] NOT found in cache") result = f(*args, **kwargs) self[key] = result return result return _f def expire(self, digest_hash): """ Permanently removes the item, both in the memory and in the filesystem. """ # remove from filesystem if digest_hash in self.fs_keys: self._remove(digest_hash) # remove from memoruy if digest_hash in self.v_keys: try: del self[digest_hash] except CacheError as e: cherrypy.log.error("[CACHE] error deleting item %s" % str(e)) def _remove(self, digest): """ Removes the cache item keyed by `key` from the file system. """ path = os.path.join(self._fs_cache_path, digest) if os.path.exists(path): try: os.remove(path) except: msg = "[CACHE] No object for key `%s` stored." % str(path) cherrypy.log.error(msg) else: msg = "[CACHE] No object for key `%s` stored." % str(path) cherrypy.log.error(msg) def unload(self, k): """ Removes the object keyed by k from virtual memory only. :param k: :return: """ digest = self.digest_hash(k) if digest in self._loaded: del(self._loaded[digest]) def load(self, key): """ Causes the object keyed by `k` to be loaded from the file system and returned. It therefore causes this object to reside in memory (if it exists in the cache). """ return self[key] def _load(self, digest, k): """ Loads the :class:`CacheObject` keyed by `k` from the file system (residing in a file named by `digest`) and returns the object. This method is part of the implementation of :class:`FSCache`, so don't use it as part of the API. """ # load from file, if possible path = os.path.join(self._fs_cache_path, digest) if os.path.exists(path): # cherrypy.log.error("[CACHE] %s fs path cache found" % (path)) contents = self.read(path) else: msg = "[CACHE] Object for key `%s` does not exist." % (k,) raise CacheError(msg) # store in cache self._loaded[digest] = contents return contents def cached_item_expire_time(self): """ Returns an expiry for the cache in seconds as if the start of the expiration period were the moment at which this the method is called. >>> import time >>> c = Cache('cache/dir', seconds=60) >>> round(c.cached_item_expire_time() - time.time(), 3) 60.0 """ if self._init_expire_time is None: x = None else: x = self._init_expire_time + time.time() return x @property def v_keys(self): """ Returns a list of virtual memory keys. :return: keys for virtual cache """ return list(self._loaded.keys()) @property def fs_keys(self): """ Returns the names of the files in the cache on the filesystem. :return: list of names of cached files """ return os.listdir(self._fs_cache_path) def clean(self): """ clean the in memory and fs cache recommended to call this by some thread under a certain time interval :return: not used """ cherrypy.log.error("[CACHE] starting the cleaning session for the file system cache") # check for slycat path self.check_fs_path() # remove expired files from cache for f in os.listdir(self._fs_cache_path): path = os.path.join(self._fs_cache_path, f) try: contents = self.read(path) if contents.expired(): cherrypy.log.error("[CACHE] expired content found -- deleting %s." % f) self.expire(f) except CacheError as e: cherrypy.log.error("[CACHE] error deleting item %s." % str(e)) # remove expired items from memory (should have been removed by above) for key in self.v_keys: if self._loaded[key].expired(): self.expire(key) def clear(self): """ clear cache items from virtual memory. :return: not used """ self._loaded.clear() def purge(self): """ empties the cache from fs and v memory :return: not used """ for f in os.listdir(self._fs_cache_path): path = os.path.join(self._fs_cache_path, f) os.remove(path) self.clear() @property def lock(self): """ threading.Lock() used to control crud operations to the cache. :return: """ return self._lock @staticmethod def digest_hash(key): """ Creates a digest hash >>> adict = {'a' : {'b':1}, 'f': []} >>> Cache.digest_hash(adict) 'a2VKynHgDrUIm17r6BQ5QcA5XVmqpNBmiKbZ9kTu0A' :param key: key to hash :return: digest hash of key """ digest_hash = hashlib.sha256(str(key).encode()).digest() b64_digest_hash = str(base64.urlsafe_b64encode(digest_hash)[:-2]) return b64_digest_hash.replace('-', '=') def read(self, filename): """ Helper function that simply pickle loads the first object from the file named by `filename`. """ with self.lock: # load file or raise exception try: with open(filename, 'rb') as loaded_file: loaded_obj = pickle.load(loaded_file) except Exception as e: msg = "[CACHE] Cache read file error %s." % str(e) raise CacheError(msg) return loaded_obj def write(self, obj, filename): """ writes an object to the selected file path """ with self.lock: try: with open(filename, 'wb') as cache_file: pickle.dump(obj, cache_file, protocol=pickle.HIGHEST_PROTOCOL) except Exception as e: msg = "[CACHE] Write error failure %s." % str(e) raise CacheError(msg) # all the remaining methods deal with time stamp conversion @staticmethod def years_to_seconds(years): """ Converts years to seconds. :return: float """ return 3.15569e7 * years @staticmethod def months_to_seconds(months): """ Converts months to seconds. :return: float """ return 2.62974e6 * months @staticmethod def weeks_to_seconds(weeks): """ Converts weeks to seconds. :return: float """ return 604800.0 * weeks @staticmethod def days_to_seconds(days): """ Converts days to seconds. :return: float """ return 86400.0 * days @staticmethod def hours_to_seconds(hours): """ Converts hours to seconds. :return: float """ return 3600.0 * hours @staticmethod def minutes_to_seconds(minutes): """ Converts minutes to seconds. :return: float """ return 60.0 * minutes @staticmethod def seconds_to_seconds(seconds): """ Converts seconds to seconds as a float. :return: float """ return float(seconds) @staticmethod def to_seconds(**kwargs): """ Converts keyword arguments to seconds. >>> Cache.to_seconds(seconds=1, minutes=1, hours=1, days=1, weeks=1, months=1, years=1) 34881501.0 >>> Cache.to_seconds(seconds=1, minutes=1) 61 :param kwargs: The the keyword arguments can have the following keys: - years (31,556,900 seconds per year) - months (2,629,740 seconds per month) - weeks (604,800 seconds per week) - days (86,400 seconds per day) - hours (3600 seconds per hour) - minutes (60 seconds per minute) - seconds :return: number of seconds as a float """ time_converter_map = {"years": Cache.years_to_seconds, "months": Cache.months_to_seconds, "weeks": Cache.weeks_to_seconds, "days": Cache.days_to_seconds, "hours": Cache.hours_to_seconds, "minutes": Cache.minutes_to_seconds, "seconds": Cache.seconds_to_seconds} # converts keywords arguments to seconds seconds = [] for key, value in list(kwargs.items()): if key in time_converter_map: seconds.append(time_converter_map[key](value)) else: msg = "invalid time argument: %s" % key raise TimeError(msg) return sum(seconds) # using main to test Cache code if __name__ == "__main__": # starting cache tests print() print("Testing cache.py") print("================") # remove cache # cache = Cache("cache/dir") # cache.purge() # test time calculations assert Cache.to_seconds(seconds=1, minutes=1) == 61, \ "time is not calculated correctly should be 61" assert Cache.to_seconds(seconds=1, minutes=1, hours=1, days=1, \ weeks=1, months=1, years=1) == 34881501.0, \ "time is not calculated correctly should be 34881501.0" try: Cache.to_seconds(not_a_key=1, minutes=1) except TimeError as e: assert str(e) == 'invalid time argument: not_a_key', "did not catch bad key" # create cache in cache/dir, expires in 20 seconds cache = Cache("cache/dir", seconds=20) # create cache function @cache def test(seed=1): """ test function :param seed: some garbage number :return: seed + test + random in a string """ import random print("test(): not cached") return str(seed) + " test " + str(random.random()) # cache should be empty print("Retrieving non-existing value from cache: ") print(cache["bark"]) print() # test cache function print("Calling cache function 'test()':") print(test()) print() # test cache function with different seeds print("Calling cache function test(seed=2):") print((test(seed=2))) print() print("Calling cache function test(seed=3):") print((test(seed=3))) print() # add item to cache print("Adding {'meow': 'xyz'} to cache.") cache["meow"] = "xyz" print("Retrieving 'meow': " + cache["meow"]) print() # change item in cache print("Adding {'meow': 'rgb'} to cache.") cache["meow"] = "rgb" print("Retrieving 'meow': " + cache["meow"]) print() # adding empty value to cache try: empty_obj = cache.read('cache/dir/no-object.pkl') except CacheError: print("Failed to load non-existing cache file.\n") # load from cache meow = cache.load("meow") print("Loading 'meow' from cache.") print(meow) print() # print hash keys print("Virtual hash keys:") print(cache.v_keys) print() # print has keys fs print("Filesystem hash keys:") print(cache.fs_keys) print() # load expired from cache cache.expire(cache.digest_hash("meow")) meow = cache.load("meow") print("Loading non-existent key from cache.") print(meow) print()
python
from materials_io.base import BaseParser, BaseSingleFileParser from glob import glob import pytest import os class FakeParser(BaseParser): def parse(self, group, context=None): return {'group': list(group)} def implementors(self): return ['Logan Ward'] def version(self): return '0.0.0' class FakeSingleParser(BaseSingleFileParser): def _parse_file(self, path, context=None): return {'dirname': os.path.dirname(path)} def implementors(self): return ['Logan Ward'] def version(self): return '0.0.0' @pytest.fixture def directory(): return os.path.dirname(__file__) @pytest.fixture def parser(): return FakeParser() @pytest.fixture def my_files(directory): return [p for p in glob(os.path.join(directory, '**', '*'), recursive=True) if os.path.isfile(p)] def test_group(parser, directory, my_files): groups = set(parser.group(my_files)) assert groups == set(zip(my_files)) # Each file own group def test_parse_dir(caplog, parser, directory, my_files): assert len(list(parser.parse_directory(directory))) == len(my_files) def test_citations(parser): assert parser.citations() == [] def test_single_file(directory): parser = FakeSingleParser() assert parser.parse(__file__) == {'dirname': directory} # Handle sensibly incorrect inputs assert parser.parse([__file__]) == {'dirname': directory} with pytest.raises(ValueError): parser.parse(['/fake/file.in', '/fake/file.out'])
python
"""Base class for all linear models. Subclasses must implement their own _fit_regression, _fit_classifier, and _iter_minibatches functions. Everything else (prediction, generating model summaries, saving, loading, one-vs-rest training) is handled by this. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from collections import defaultdict from collections import namedtuple import math import os import time import numpy as np from sklearn import linear_model from tqdm import tqdm import sys; sys.path.append('../..') from src.models.abstract_model import Model from src.models.abstract_model import Prediction import src.msc.utils as utils # Singleton class for packaging the results of an individual regression or # classification model. For ordinal variables with multiple levels, the system # trains a separate regression per level. # See: https://en.wikipedia.org/wiki/Multiclass_classification#One-vs.-rest ModelResult = namedtuple('ModelResult', ('model', 'response_type', 'weights')) class Regression(Model): """Base class for all linear models.""" def __init__(self, config, params, intercept=True): """Initializes a Regression by unpacking the target and confound variables. Args: config: NamedTuple, a config.yaml file that's been parsed into an object. params: dict, the part of the config which has to do with this model. Note that this dict is a member of config.model_spec. intercept: bool, whether or not we should fit an intercept. """ Model.__init__(self, config, params) # This dict maps variable names to the model for that variable. # If a variable is categorical, then instead of a model for a value, # the value is a nested dictionary which maps categorical levels # to the model for that level. self.models = {} self.use_intercept = intercept # Get all of the variables which are prediction targets, as well # as all of the variables which are confounders. variables = [ v for v in self.config.data_spec[1:] if not v.get('skip', False) ] self.targets = [ variable for variable in variables if not variable['control'] ] self.confounds = [variable for variable in variables if variable['control']] self.confound_names = [variable['name'] for variable in self.confounds] self.lmbda = self.params.get('lambda', 0) self.regularizer = self.params['regularizer'] if self.lmbda > 0 else None def save(self, model_dir): """Saves all of the models in self.models into `model_dir`. The models are saved as serialized pickle objects. See: https://docs.python.org/3/library/pickle.html Args: model_dir: string, the directory to save into. """ if not os.path.exists(model_dir): os.makedirs(model_dir) models_file = os.path.join(model_dir, 'models') utils.pickle(self.models, models_file) print('REGRESSION: models saved into %s' % models_file) def load(self, dataset, model_dir): """Loads self.models from `model_dir`.""" start = time.time() self.models = utils.depickle(os.path.join(model_dir, 'models')) target_names = [x['name'] for x in self.targets] assert set(target_names) == set(self.models.keys()) print('REGRESSION: loaded model parameters from %s, time %.2fs' % ( model_dir, time.time() - start)) def _summarize_model_weights(self): """Gets a single "importance value" for each feature from self.models.""" out = {} for variable_name, variable_result in self.models.items(): # This means that the current variable is categorical, since # self.models[categorical variable] maps to a {level => ModelResult} # dictionary. if isinstance(variable_result, dict): for level_name, level_result in variable_result.items(): if variable_name not in out: out[variable_name] = {} out[variable_name][level_name] = level_result.weights else: out[variable_name] = variable_result.weights return out def inference(self, dataset, model_dir): """Uses self.models to perform inference over a dataset. Args: dataset: src.data.dataset.Dataset, the dataset for performing inference. model_dir: string, unused, but possibly used by subclasses. Returns: A src.models.abstract_model.Prediction object. """ print('REGRESSION: getting data for inference...') x, _, features = next(self._iter_minibatches(dataset)) predictions = defaultdict(dict) for response_name, model in self.models.iteritems(): if isinstance(model, dict): # Convert {level: scores} to 2d matrix with columns: # level1 score, level2 score, etc # (where ordering is determined by the dataset). response_levels = dataset.num_levels(response_name) arr = np.array([ self._predict(x, features, model[dataset.id_to_class_map[response_name][level]]) for level in range(response_levels) ]) # Squeeze out empty dimensions. if len(arr.shape) > 2: arr = np.squeeze(arr, axis=2) predictions[response_name] = np.transpose(arr, [1, 0]) else: predictions[response_name] = self._predict(x, features, model) average_coefs = self._summarize_model_weights() return Prediction(scores=predictions, feature_importance=average_coefs) def _predict(self, x, feature_names, model): """Uses a model to create predictions for a bunch of covariates X. We are not using sklearn's predict() function because feature_names might be a subset of x's columns, which is a case that sklearn does not support. Args: x: np array [n examples, n features], the covariates to be inputted to the model. feature_names: list(string), column names for X. model: an instance of sklearn.linear_model, the model we are using for inference. Returns: out: list(float) or list(list(float)), predictions for each `x`. """ def score(example): s = 0 for xi, feature in zip(example, feature_names): s += model.weights.get(feature, 0) * xi s += (model.weights['intercept'] if self.use_intercept else 0) return s out = [] for row in tqdm(x): s = score(np.squeeze(row)) if model.response_type == 'continuous': out.append(s) else: try: out.append(1.0 / (1 + math.exp(-s))) except OverflowError: out.append(1.0 if s > 0 else 0) return out def _fit_one_vs_rest(self, dataset, target, features=None): """Fits a classifier to each level of a categorical variable (`target`). See: https://en.wikipedia.org/wiki/Multiclass_classification#One-vs.-rest Args: dataset: dataset.Dataset, the data we are fitting. target: dict, a member of config.data_spec, the variable we are predicting. features: list(string), an optional subset of the features we should restrict the model to. Returns: models: dict(string => regression_base.ModelResult): a trained model per level of the target variable. """ models = {} # class_to_id is a nested dict where # each key (each categorical var) points to a dict mapping to ids. # So we are looping through all the possible classes of this categorical # variable. for level in dataset.class_to_id_map[target['name']].keys(): models[level] = self._fit_classifier( dataset, target, level=level, features=features) return models def train(self, dataset, model_dir, features=None): """Trains a model for each target.""" for target in self.targets: if target['type'] == utils.CONTINUOUS: self.models[target['name']] = self._fit_regression( dataset=dataset, target=target, features=features) else: self.models[target['name']] = self._fit_one_vs_rest( dataset=dataset, target=target, features=features) def _iter_minibatches(self, dataset, target_name=None, features=None, level=None, batch_size=None): """Continuously loops over the `dataset` and yields (covariate, Y) pairs. If batch_size is None then we iterate once. Otherwise the generator will continuously cycle over the data. Args: dataset: src.data.dataset.Dataset, the dataset we are iterative over. target_name: string, the name of the variable that should be used for the targets (Y). features: list(string), a subset of the features that we should select when pulling X from the data. If this isn't provided, then X will include all features in the data. level: string, the categorical level which is to be retrieved for Y. If supplied, Y is assumed to be categorical. batch_size: int, the batch size to use. Yields: x: np.array(int) [batch size, num features], word occurrence vectors for a batch of examples. If example i has feature j then x[i, j] == 1. y: np.array(int or float) [batch size], target labels for x. If the target class is C, then y[i] == 1 if example i belongs to C. x_features: list(string), column names for x, i.e. an ordered list of feature names. """ i = 0 while True: start = i end = (i + batch_size if batch_size else None) # If target_name is missing, we are doing inference so y can be None. if target_name is not None: y = dataset.y_batch(target_name, level, start, end) else: y = None x, x_features = dataset.text_x_batch(features, start, end) yield x, y, x_features # If batch_size is missing, we are returning the whole dataset so # no need to keep iterating. if batch_size is None: break i += batch_size if i + batch_size > dataset.split_sizes[dataset.current_split]: i = 0 def _sklearn_weights(self, model, feature_names): """Gets a feature_name=>weight mapping for the model.""" weights = {} for w, f in zip(np.squeeze(model.coef_), feature_names): weights[f] = w if self.use_intercept: weights['intercept'] = model.intercept_ return weights def _fit_regression(self, dataset, target, level=None, features=None): """Fits a regression -- to be implemented by subclasses. This method updates self.model[target] with the trained model and does not return anything. Args: dataset: src.data.dataset.Dataset, the data which is to be used for fitting. target: string, the name of the target variable. level: string, the target's sub-class. If this isn't specified, the system will assume that the target is monolithic. features: list(string), a subset of dataset.vocab which is to be used while fitting. Returns: regression_base.ModelResult, the fitted parameters. """ iterator = self._iter_minibatches( dataset=dataset, target_name=target['name'], features=features, batch_size=self.params['batch_size'], level=level) print('REGRESSION: fitting target %s', target['name']) model = linear_model.SGDRegressor( penalty=self.regularizer or 'none', alpha=self.lmbda, learning_rate='constant', eta0=self.params.get('lr', 0.001)) for _ in tqdm(range(self.params['num_train_steps'])): xi, yi, x_features = next(iterator) model.partial_fit(xi, yi) return ModelResult( model=model, weights=self._sklearn_weights(model, x_features), response_type='continuous') def _fit_classifier(self, dataset, target, level=None, features=None): """Fits a classifier -- to be implemented by subclasses. Multiclass classification is done with OVR (one versus rest) classification. This means that there is a separate regression for each class, and each of these regressions is trained to pick this class out. This method updates self.model[target] with the trained model and does not return anything. Args: dataset: src.data.dataset.Dataset, the data to be used for fitting. target: string, the name of the target variable. level: string, the target's sub-class. If this isn't specified, the system will assume that the target is monolithic. features: list(string), a subset of dataset.vocab which is to be used while fitting. Returns: regression_base.ModelResult, the fitted parameters. """ iterator = self._iter_minibatches( dataset=dataset, target_name=target['name'], features=features, level=level, batch_size=self.params['batch_size']) print('CLASSIFICATION: fitting target %s, level %s', target['name'], level) model = linear_model.SGDClassifier( loss='log', penalty=(self.regularizer or 'none'), alpha=self.lmbda, learning_rate='constant', eta0=self.params.get('lr', 1.0)) for _ in tqdm(range(self.params['num_train_steps'])): xi, yi, x_features = next(iterator) model.partial_fit(xi, yi, classes=[0., 1.]) return ModelResult( model=model, weights=self._sklearn_weights(model, x_features), response_type='categorical')
python
# Copyright 2020 kubeflow.org # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import kfp.dsl as dsl from kubernetes.client import V1Volume, V1SecretVolumeSource, V1VolumeMount, V1EnvVar @dsl.pipeline( name='Volume', description='A pipeline with volume.' ) def volume_pipeline(): op1 = dsl.ContainerOp(name='download', image='google/cloud-sdk', command=['sh', '-c'], arguments=['ls | tee /tmp/results.txt'], file_outputs={'downloaded': '/tmp/results.txt'}) op1.add_volume(V1Volume(name='gcp-credentials', secret=V1SecretVolumeSource(secret_name='user-gcp-sa'))) op1.container.add_volume_mount(V1VolumeMount(mount_path='/secret/gcp-credentials', name='gcp-credentials')) op1.container.add_env_variable(V1EnvVar(name='GOOGLE_APPLICATION_CREDENTIALS', value='/secret/gcp-credentials/user-gcp-sa.json')) op1.container.add_env_variable(V1EnvVar(name='Foo', value='bar')) op2 = dsl.ContainerOp(name='echo', image='library/bash', command=['sh', '-c'], arguments=['echo %s' % op1.output]) if __name__ == '__main__': from kfp_tekton.compiler import TektonCompiler TektonCompiler().compile(volume_pipeline, __file__.replace('.py', '.yaml'))
python
# import asyncio # import requests # import json # import re import os import discord from discord.ext import commands, tasks from discord_slash import SlashCommand, SlashContext from itertools import cycle import keep_alive # # grabbing the config file # with open('config.json') as config_file: # secrets = json.load(config_file) # grabbing keys token = os.getenv("bot_token") key = os.getenv("api_key") # intents so bot can see members from DMs intents = discord.Intents(messages=True, reactions=True, members=True, guilds=True, presences=True) # bot info bot = commands.Bot( command_prefix='!' , description='Bot to help Exogen players make calculations, and for mods/admins to manage the server.' , case_insensitive=True , intents=intents ) slash = SlashCommand(bot, sync_commands=True) # background task to keep bot awake when web-hosted on Repl.it status = cycle(['Exogen ░░░░░░░░', 'Exogen ░░░░░░░▒', 'Exogen ░░░░░░▒▓', 'Exogen ░░░░░▒▓▒', 'Exogen ░░░░▒▓▒░', 'Exogen ░░░▒▓▒░░', 'Exogen ░░▒▓▒░░░', 'Exogen ░▒▓▒░░░░', 'Exogen ▒▓▒░░░░░', 'Exogen ▓▒░░░░░░', 'Exogen ▒░░░░░░░', 'Exogen ░░░░░░░░', 'Exogen ▒░░░░░░░', 'Exogen ▓▒░░░░░░', 'Exogen ▒▓▒░░░░░', 'Exogen ░▒▓▒░░░░', 'Exogen ░░▒▓▒░░░', 'Exogen ░░░▒▓▒░░', 'Exogen ░░░░▒▓▒░', 'Exogen ░░░░░▒▓▒', 'Exogen ░░░░░░▒▓', 'Exogen ░░░░░░░▒']) # @bot.event # async def on_ready(): # change_status.start() # print("Your bot is ready") @tasks.loop(seconds=2) async def change_status(): await bot.change_presence(activity=discord.Game(next(status))) # gathering the commands cogs = [ 'cogs.mod' , 'cogs.advisors' , 'cogs.calcs' ] # limiting the eval command to just the bot owner @bot.command(name='eval', hidden=True) @commands.is_owner() async def _eval(ctx, *, code): await ctx.send(eval(code)) @_eval.error async def eval_error(error, ctx): if isinstance(error, commands.MissingPermissions): text = "Sorry {}, you do not have permissions to do that!".format(ctx.message.author) await ctx.send(ctx.message.channel, text) # command that DMs the sender @bot.command( pass_context=True, name='direct_message', description='Initiates a DM with the user.', help='starts a DM with the user', aliases=['dm'], usage='' ) async def dm(ctx): await ctx.author.send("Hey, what do you need?") # slash command that DMs the sender @slash.slash( name='direct_message', description='Initiates a DM with the user.', guild_ids=[637447316856373268] ) async def _dm(ctx): await ctx.author.send("Hey, what do you need?") await ctx.send("Sliding into those DMs.") @bot.event async def on_member_join(member): guild = member.guild channel = bot.get_channel(813417162249207818) await channel.edit(name=f'members {guild.member_count}') rules = bot.get_channel(704733802223894648) nav = bot.get_channel(771885969715626005) role = discord.utils.get(member.guild.roles, id=906375433329725451) await member.add_roles(role) await member.send("Welcome, {}!".format(member.name)) await member.send("Please check out the {} before heading over to {} to see where things are located." .format(rules.mention, nav.mention)) await member.send("If you are unfamiliar with Exogen, feel free to check out the manual:\n" "https://discordapp.com/channels/637447316856373268/704724317279092756/705170179893624943\n" "And for advice on getting your corporation up and running, check out this startup guide from " "the Pale Blue Dot megacorp:\n" "https://discord.com/channels/637447316856373268/704733458227789937/745698128627236965") @bot.event async def on_member_remove(member): guild = member.guild channel = bot.get_channel(813417162249207818) await channel.edit(name=f'members {guild.member_count}') @bot.event async def on_raw_reaction_add(payload): guild = bot.get_guild(payload.guild_id) member = discord.utils.get(guild.members, id=payload.user_id) # RP reaction role if payload.channel_id == 774834872719507496 and payload.message_id == 774845668745019392: role = discord.utils.get(payload.member.guild.roles, name="RP opt in") if str(payload.emoji) == '<:BHC:749478461562683443>': await payload.member.add_roles(role) # wiki reaction role elif payload.channel_id == 794598980973363210 and payload.message_id == 794600306532548618: role = discord.utils.get(payload.member.guild.roles, name="Researcher") if str(payload.emoji) == '<:ArchangelFoundation:749053627947286548>': await payload.member.add_roles(role) # rules reaction role # elif payload.channel_id == 704733802223894648 and payload.message_id == 706999325556867163: # role = discord.utils.get(payload.member.guild.roles, name="Accepted Rules") # role_b = discord.utils.get(payload.member.guild.roles, name="Not Accepted Rules") #id=906375433329725451) # if str(payload.emoji) == '<:Exogen:749051544745541744>': # or str(payload.emoji) == '👍': # await payload.member.add_roles(role) # # await payload.member.remove_roles(role_b) @bot.event async def on_raw_reaction_remove(payload): guild = bot.get_guild(payload.guild_id) member = discord.utils.get(guild.members, id=payload.user_id) # RP reaction role if payload.channel_id == 774834872719507496 and payload.message_id == 774845668745019392: role = discord.utils.get(guild.roles, name="RP opt in") if str(payload.emoji) == '<:BHC:749478461562683443>': await member.remove_roles(role) # wiki reaction role elif payload.channel_id == 794598980973363210 and payload.message_id == 794600306532548618: role = discord.utils.get(guild.roles, name="Researcher") if str(payload.emoji) == '<:ArchangelFoundation:749053627947286548>': await member.remove_roles(role) # rules reaction role # elif payload.channel_id == 704733802223894648 and payload.message_id == 706999325556867163: # role = discord.utils.get(guild.roles, name="Accepted Rules") # role_b = discord.utils.get(member.guild.roles, name="Not Accepted Rules") #id=906375433329725451) # if str(payload.emoji) == '<:Exogen:749051544745541744>': # or str(payload.emoji) == '👍': # await member.remove_roles(role) # # await member.add_roles(role_b) # bot start up event @bot.event async def on_ready(): print("The bot is ready!") print(f'Logged in as: {bot.user.name} - {bot.user.id}') print(f'Discord version is: {discord.__version__}') print('------------------------------------------------------') await bot.change_presence(activity=discord.Game(name="Exogen")) change_status.start() for cog in cogs: bot.load_extension(cog) print(f'{cog} is ready.') print('------------------------------------------------------') return # run Flask script to keep bot online keep_alive.keep_alive() # run bot # bot.run(secrets['token']) bot.run(token)
python
from sys import exit import json from time import sleep from confluent_kafka import Consumer, KafkaError ERROR_CODE_ZERO = 0 ERROR_CODE_ONE = 1 EMPTY_ERROR_MESSAGE = "" PAUSE = 3 class KafkaConsumer: def __init__(self, settings, client_id, timeout, auto_commit): self._settings = settings self._timeout = timeout self._auto_commit = auto_commit self._client_id = client_id self._consumer = self._build_consumer() def subscribe(self, topic_name, timeout): """ Subscribe for topic and listen to new messages until the timeout of Kafka consumer occurs which will end the session :param topic_name: the topic name :param timeout: Maximum time to block waiting for message, event or callback :return: error_code, error, message """ self._consumer.subscribe([topic_name]) try: while True: msg = self._consumer.poll(timeout) if msg is None: continue elif not msg.error(): yield ERROR_CODE_ZERO, EMPTY_ERROR_MESSAGE, json.loads(msg.value()) elif msg.error().code() == KafkaError._PARTITION_EOF: yield ERROR_CODE_ONE, 'End of partition reached {0}/{1}'.format(msg.topic(), msg.partition()), None else: yield ERROR_CODE_ONE, 'Error occured: {0}'.format(msg.error().str()), None sleep(PAUSE) except KeyboardInterrupt: exit(0) finally: self._consumer.close() def _build_consumer(self): """ Creates kafka consumer object. :return: """ settings = { 'bootstrap.servers': self._settings["fba_kafka_bootstrap_server"], 'group.id': self._settings["fba_kafka_consumer_group_name"], 'client.id': self._client_id, 'enable.auto.commit': self._auto_commit, 'session.timeout.ms': self._timeout, 'security.protocol': 'SSL', 'ssl.ca.location': self._settings["ssl_ca_location"], 'ssl.certificate.location': self._settings["ssl_certificate_location"], 'ssl.key.location': self._settings["ssl_key_location"], 'ssl.key.password': self._settings["key_store_pass"], #'auto.offset.reset': 'smallest' } try: cons = Consumer(settings) return cons except Exception as e: print("Error in creating the Consumer: ", e) #exit(1)
python
#!/usr/bin/env python3 """ both web service and mosquitto are running locally. MENSHNET_UNITTEST="yes" is defined 1. simulate the okta routine that creates the api key by calling the same endpoint in the server to generate an apiKey. """ import os os.environ["MENSHNET_UNITTEST"] = "yes" import menshnet
python
from abc import ABCMeta, abstractmethod class Animal(metaclass=ABCMeta): def walk(self): print('Walking...') def eat(self): print('Eating...') @abstractmethod def num_legs(): pass
python
class Solution: def solve(self, n): count = 1 while n != 1: if n % 2 == 0: n //= 2 else: n = 3 * n + 1 count += 1 return count
python
# -*- coding: utf-8 -*- from flask import Flask, jsonify from flask.ext.cors import CORS, cross_origin from pymongo import MongoClient import os app = Flask(__name__) CORS(app) mongodb_host = '172.16.0.2' mongodb_port = 27017 client = MongoClient(mongodb_host,mongodb_port) collection = client.conflict_db.events @app.route('/', methods=['GET']) def hello_world(): output = 'Hi, give me some parameter, would you?' return jsonify({'result' : output}) @app.route('/markers/dyad=<int:dyad_new_id>&min=<int:minimum>&max=<int:maximum>', methods=['GET']) @app.route('/markers/dyad=<int:dyad_new_id>', defaults={'minimum':None, 'maximum':None}, methods=['GET']) @app.route('/markers', defaults={'dyad_new_id':None, 'minimum':None,'maximum':None}, methods=['GET']) def get_markers(dyad_new_id,minimum,maximum): output = [] counter = 0 if dyad_new_id is not None and minimum is None and maximum is None: print 'dyad is given' for q in collection.find({'dyad_new_id': dyad_new_id},{'_id':False}).sort([('date_start',1)]): output.append({'id' : q['id'], 'lat' : q['latitude'], 'lon' : q['longitude'], 'time' : q['date_start']}) counter = counter + 1 return jsonify({'result' : output, 'records': counter}) elif dyad_new_id is not None and minimum is not None and maximum is not None: print 'dyad, death_range are given' for q in collection.find({'dyad_new_id': dyad_new_id, 'best':{'$gte':minimum,'$lte':maximum}},{'_id':False}).sort([('date_start',1)]): output.append({'id' : q['id'], 'lat' : q['latitude'], 'lon' : q['longitude'], 'time' : q['date_start']}) counter = counter + 1 return jsonify({'result' : output, 'records': counter}) if dyad_new_id is None and minimum is None and maximum is None: print 'nothing given' for q in collection.find({},{'_id':False}).sort([('date_start',1)]): output.append({'id': q['id'], 'lat': q['latitude'], 'lon': q['longitude'], 'time': q['date_start']}) counter = counter + 1 return jsonify({'result' : output, 'records': counter}) @app.route('/details/<int:event_id>', methods=['GET']) def get_details(event_id): q = collection.find_one({'id': event_id,},{'_id':False}) if q: output = {'source_article': q['source_article'], 'where_coordinates': q['where_coordinates'], 'side_a': q['side_a'], 'side_b': q['side_b'], 'deaths_a': q['deaths_a'], 'deaths_b': q['deaths_b'], 'deaths_civilians': q['deaths_civilians'], 'deaths_unknown': q['deaths_unknown']} else: print q output = 'No results found' return jsonify({'result' : output}) @app.route('/dyads', methods=['GET']) def get_dyads(): output = {} counter = 0 ids = collection.distinct('dyad_new_id') names = collection.distinct('dyad_name') try: for q,w in enumerate(ids): output[w] = names[q] counter = counter + 1 except: output = 'Things went terribly wrong' return jsonify({'result' : output, 'records': counter}) @app.route('/death_range', methods=['GET']) def get_minmax(): output = {} divider = 8 try: for q in collection.find({},{'best':True,'_id':False}).sort([('best',1)]).limit(1): best_min = q['best'] for w in collection.find({},{'best':True,'_id':False}).sort([('best',-1)]).limit(1): best_max = w['best'] except: output = 'Things went terribly wrong' avg = (best_max - best_min + 1)/divider for x in range(0,divider): i = (best_min+(x)*avg) j = (best_min+(x+1)*avg-1) output[x] = str(i) + '-' + str(j) return jsonify({'result' : output}) if __name__ == '__main__': app.run(host='0.0.0.0', port=5001, debug=True, threaded=True )
python
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore __protobuf__ = proto.module( package='google.ads.googleads.v7.enums', marshal='google.ads.googleads.v7', manifest={ 'RecommendationTypeEnum', }, ) class RecommendationTypeEnum(proto.Message): r"""Container for enum describing types of recommendations. """ class RecommendationType(proto.Enum): r"""Types of recommendations.""" UNSPECIFIED = 0 UNKNOWN = 1 CAMPAIGN_BUDGET = 2 KEYWORD = 3 TEXT_AD = 4 TARGET_CPA_OPT_IN = 5 MAXIMIZE_CONVERSIONS_OPT_IN = 6 ENHANCED_CPC_OPT_IN = 7 SEARCH_PARTNERS_OPT_IN = 8 MAXIMIZE_CLICKS_OPT_IN = 9 OPTIMIZE_AD_ROTATION = 10 CALLOUT_EXTENSION = 11 SITELINK_EXTENSION = 12 CALL_EXTENSION = 13 KEYWORD_MATCH_TYPE = 14 MOVE_UNUSED_BUDGET = 15 FORECASTING_CAMPAIGN_BUDGET = 16 TARGET_ROAS_OPT_IN = 17 RESPONSIVE_SEARCH_AD = 18 MARGINAL_ROI_CAMPAIGN_BUDGET = 19 __all__ = tuple(sorted(__protobuf__.manifest))
python
import os import subprocess from setuptools import find_packages, setup from torch.utils.cpp_extension import BuildExtension, CUDAExtension def get_git_commit_number(): if not os.path.exists('.git'): return '0000000' cmd_out = subprocess.run(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE) git_commit_number = cmd_out.stdout.decode('utf-8')[:7] return git_commit_number def make_cuda_ext(name, module, sources): cuda_ext = CUDAExtension( name='%s.%s' % (module, name), sources=[os.path.join(*module.split('.'), src) for src in sources] ) return cuda_ext def write_version_to_file(version, target_file): with open(target_file, 'w') as f: print('__version__ = "%s"' % version, file=f) if __name__ == '__main__': version = '0.3.0+%s' % get_git_commit_number() write_version_to_file(version, 'pcdet/version.py') setup( name='pcdet', version=version, description='OpenPCDet is a general codebase for 3D object detection from point cloud', install_requires=[ 'numpy', 'torch>=1.1', 'spconv', 'numba', 'tensorboardX', 'easydict', 'pyyaml' ], author='Shaoshuai Shi', author_email='[email protected]', license='Apache License 2.0', packages=find_packages(exclude=['tools', 'data', 'output']), cmdclass={'build_ext': BuildExtension}, ext_modules=[ make_cuda_ext( name='pointnet2_stack_cuda', module='pcdet.ops.pointnet2.pointnet2_stack', sources=[ 'src/pointnet2_api.cpp', 'src/ball_query.cpp', 'src/ball_query_gpu.cu', 'src/group_points.cpp', 'src/group_points_gpu.cu', 'src/sampling.cpp', 'src/sampling_gpu.cu', 'src/interpolate.cpp', 'src/interpolate_gpu.cu', ], ), make_cuda_ext( name='pointnet2_batch_cuda', module='pcdet.ops.pointnet2.pointnet2_batch', sources=[ 'src/pointnet2_api.cpp', 'src/ball_query.cpp', 'src/ball_query_gpu.cu', 'src/group_points.cpp', 'src/group_points_gpu.cu', 'src/interpolate.cpp', 'src/interpolate_gpu.cu', 'src/sampling.cpp', 'src/sampling_gpu.cu', ], ), ], )
python
from django.test.testcases import TestCase from corehq.messaging.smsbackends.sislog.util import convert_raw_string class GSM0338Portuguese(TestCase): def test_decode(self): raw_to_clean = { # basic character test "associa\x09\x7bo": "associa\u00e7\u00e3o", # extended character test "a\x09\x1b\x75car": "a\u00e7\u00facar", # no decode "no decode needed": "no decode needed", } for raw, expected in raw_to_clean.items(): cleaned = convert_raw_string(raw) self.assertEqual(cleaned, expected) print("Cleaned text: %s" % cleaned)
python
from finbert.finbert import predict from pytorch_pretrained_bert.modeling import BertForSequenceClassification import argparse from pathlib import Path import datetime import os import random import string import pandas as pd import time import pickle import multiprocessing as mp import gc # globals model = None parser = argparse.ArgumentParser(description='Sentiment analyzer') parser.add_argument('--model_path', type=str, help='Path to classifier model') args = parser.parse_args() def predict_batch(N, data_path="CC_data/", save_path="output/"): model = BertForSequenceClassification.from_pretrained(args.model_path, num_labels=3, cache_dir=None) sentence_pred_df = [] start_main = time.time() data = pickle.load(open(data_path + "BERTnews_all.p", "rb")) data = data.reset_index(drop=True) # for i in range(len(data)): for i in range(N): pred = predict(data.loc[i]['text'], data.loc[i]['index'], model, write_to_csv=False) sentence_pred_df.extend(pred) sentence_pred_df = pd.DataFrame.from_dict(sentence_pred_df) sentence_pred_df.to_csv(save_path + "BERTnews_preds.csv") end_main = time.time() print("TIME for batch_id: {}".format(round(end_main - start_main, 2))) def init_bert(model_path=args.model_path): global model # global data model = BertForSequenceClassification.from_pretrained(model_path, num_labels=3, cache_dir=None) # data = pickle.load(open("CC_data/BERTnews_all.p", "rb")) def predict_news(x): pred = predict(x[1], x[0], model, write_to_csv=False) return pred if __name__ == "__main__": # ========= single prediction ========= # start = time.time() # predict_batch(30) # end = time.time() # print("TOTAL time: {}".format(round(end-start, 2))) # ======== New multiprocessing =========== N_start = 0 # N_end = 539317 # N_end = 5000 # N_end = 30 N_end = 100000 # we parse data to list of tuples to avoid reloading entire data for every subprocess data = pickle.load(open("CC_data/BERTnews_all.p", "rb")) data_batch = [tuple(x) for x in data.loc[N_start:N_end].itertuples(index=False)] del data gc.collect() pool = mp.Pool(initializer=init_bert) print("Number of cores: ", os.cpu_count()) start = time.time() res = pool.map(predict_news, data_batch) end = time.time() print("TOTAL time: {}".format(round(end-start, 2))) # save to pandas dataframe flatten = lambda l: [item for sublist in l for item in sublist] res = flatten(res) res = pd.DataFrame.from_dict(res) res.to_csv("output/BERTnews_preds_all.csv") # ========= Naive multiprocessing ========= # pool = mp.Pool() # print("Number of cores: ", os.cpu_count()) # # start = time.time() # pool.map(predict_batch, list(range(2))) # end = time.time() # print("TOTAL time: {}".format(round(end-start, 2)))
python
import pytest from fastapi.testclient import TestClient from firedantic import ModelNotFoundError from _pytest.monkeypatch import MonkeyPatch import saatja.request_dependencies as request_dependencies from saatja.db.task import ScheduledTask, DeliveredTask, TaskError from saatja.utils import now_utc SCHEDULER_HEADERS = {"Authorization": "trustno1"} class FakeResponse: def __init__(self, status: int, text: str): self.status = status self._text = text async def text(self): return self._text def get_request_mock(): requests = { "https://example.com/1": [ FakeResponse(200, "Alles klar."), ], "https://example.com/2": [ FakeResponse(500, "Oops."), ], } async def _mock_make_request(task: ScheduledTask): resp = requests[task.url].pop(0) return resp.status, await resp.text() return requests, _mock_make_request def mock_check_authorization(*args): # Bypass authentication pass def test_task_delivery(client: TestClient, monkeypatch: MonkeyPatch): requests, request_mock = get_request_mock() monkeypatch.setattr(ScheduledTask, "_make_request", request_mock) monkeypatch.setattr( request_dependencies, "_check_scheduler_authorization", mock_check_authorization ) task = ScheduledTask(url="https://example.com/1", when=now_utc()) task.save() task2 = ScheduledTask(url="https://example.com/2", when=now_utc()) task2.save() print("----- SCHEDULER TEST -----") print(f"Task 1: {task.id}") print(f"Task 2: {task2.id}") print("") response = client.post("/scheduler/run-tasks", headers=SCHEDULER_HEADERS) assert response.status_code == 204 assert len(requests["https://example.com/1"]) == 0 assert len(requests["https://example.com/2"]) == 0 print("----- SCHEDULED TASKS -----") for r in ScheduledTask.find({}): print(f" - {r.id}: {r.when} -> {r.url}") print("") print("----- DELIVERED TASKS -----") for r in DeliveredTask.find({}): print(f" - {r.id}: {r.when} -> {r.url}") print("") print("----- TASK ERRORS -----") for r in TaskError.find({}): print(f" - {r.task_id}: {r.attempted_delivery} -> {r.status}") print("") # First task should've been delivered delivered = DeliveredTask.get_by_id(task.id) # These timestamps should be pretty close to each other assert abs((delivered.delivered - delivered.when).total_seconds()) < 2 with pytest.raises(ModelNotFoundError): ScheduledTask.get_by_id(task.id) # Second task should've received an error ScheduledTask.get_by_id(task2.id) errors = TaskError.find({"task_id": task2.id}) assert len(errors) == 1 error: TaskError = errors[0] assert error.task_id == task2.id assert abs((error.attempted_delivery - task2.when).total_seconds()) < 2 assert error.status == 500 assert error.response == "Oops."
python
from typing import Union from discordmovies.attributes import DiscordMoviesAttributes from typing import List from discordmovies.outputmodules.filehelper import FileHelper from discordmovies.inputmodules.input import Input class DiscordMovies: """ A class for going through a discord movie recommendations channel and extracting all links. These links can then be uploaded to Google Sheets or exported to a CSV. """ def __init__(self, discord_auth_token: Union[str, int], bot: bool = True, doc_name: str = "discordmovies", attributes: List[str] = None, exclude_attributes: List[str] = None): self.auth_token = discord_auth_token self.bot = bot self.attributes = DiscordMoviesAttributes( name=doc_name, attributes=attributes, exclude_attributes=exclude_attributes ) def discord_to_file(self, filetype: str, channel_id: Union[str, int], watched_channel_id: Union[str, int] = None, sheet_id: Union[str, int] = None, max_messages: int = 100, tmdb_api_key: str = None, remove_watched: bool = False, reformat_sheet: bool = False, source: str = "discord"): """ Extract all movies from a Discord channel and save them to a Google Sheet or CSV. """ file = FileHelper(filetype=filetype, attributes=self.attributes, sheet_id=sheet_id, reformat_sheet=reformat_sheet) current_content = file.get_values() # These next few if statements are checking the formatting of the # file. Basically if the header is not what's expected, the whole # sheet is overwritten. if current_content: if current_content[0] != self.attributes.movie_list. \ get_categories(): print("File formatting does not match current formatting " "settings. Sheet will be completely rewritten.") current_content = [] overwrite = True else: overwrite = False else: overwrite = False inputs = Input( source_type=source, current_content=current_content, attributes=self.attributes, auth=self.auth_token, bot=self.bot, tmdb_api_key=tmdb_api_key, watched_channel_id=watched_channel_id, remove_watched=remove_watched, movie_channel_id=channel_id, max_messages=max_messages ) if file.exists(): if not self.attributes.movie_list: inputs.setup_movie_list() file.write_existing(overwrite=overwrite) else: if not self.attributes.movie_list: inputs.setup_movie_list() file.write_new()
python
import os import shutil import datetime from ebooklib import epub from toolbox.tools import Now from compiler import epub_html now = Now() css = """body{padding:0;margin:0;line-height:1.2;text-align:justify} p{text-indent:2em;display:block;line-height:1.3;margin-top:0.6em;margin-bottom:0.6em} div{margin:0;padding:0;line-height:1.2;text-align:justify} h1{font-size:1.4em;line-height:1.2;margin-top:1em;margin-bottom:1.2em;font-weight:bold;text-align:center !important} .notetag{font-size:0.8em;vertical-align:super;font-weight:bold;color:#960014;text-decoration:none} """ def build_page(book: epub.EpubBook, file, filename): tex = open(file, "rb").read() title, content = epub_html.compile(tex) page = epub.EpubHtml(title=title, file_name=filename + ".xhtml", content=content, lang='zh') page.add_link(href='./style/style.css', rel='stylesheet', type='text/css') link = epub.Link(filename + ".xhtml", title, "chap_" + filename) book.add_item(page) book.spine.append(page) return link def work(project, _vars): book = epub.EpubBook() book.set_identifier(_vars.nid) book.set_title(_vars.title) book.set_language('zh') book.add_author(_vars.author) book.add_item(epub.EpubNav()) book.add_item(epub.EpubNcx()) book.add_item(epub.EpubItem(uid="style_nav", file_name="style/style.css", media_type="text/css", content=css)) book.spine = ['nav'] book.add_metadata('DC', 'description', _vars.description) book.toc = tuple((epub.Section(title), tuple(build_page(book, f'./{project}/{file}', file.replace(".tex", "")) for file in files)) for title, files in _vars.menu.items()) epub.write_epub(f"./artifacts/{project}/epub/{project}_latest.epub", book, {'epub3_pages': False}) shutil.copy(f"./artifacts/{project}/epub/{project}_latest.epub", f"./artifacts/{project}/epub/history/{project}_{datetime.datetime.now().strftime('%y%m%d')}.epub") _abspath = os.path.abspath(f"./artifacts/{project}/epub/{project}_latest.epub") print(f'[{now}] Epub file saved at {_abspath}.')
python
# loop3 userinput = input("Enter a letter in the range A - C : ") while (userinput != "A") and (userinput != "a") and (userinput != "B") and (userinput != "b") and (userinput != "C") and (userinput != "c"): userinput = input("Enter a letter in the range A-C : ")
python
import math import numpy as np year = input("Enter the year to be checked : ") def check_leap(year): print(type(year)) year = int(year) if year%100==0: print("Leap Year") elif year%4 == 0: print("Leap Year") elif year % 400 == 0: print("Leap Year") else: print("Not a Leap year") print(year) check_leap(year)
python
# -*- coding: utf-8 -*- __author__ = 'mariosky' import json import os import time print os.environ['REDIS_HOST'] from redis_cola import Cola, Task server = Cola("perl6") code = """ sub add($a, $b) { say "Hi"; return $a+$b; } """ test = """ # .... tests is add(6,1), 9, 'Suma dos enteros'; is add(6,-1), 2, 'Suma dos enteros error'; """ def put(): task = {"id": None, "method": "exec", "params": {"code": code, "test": test}} print task task_id = server.enqueue(**task) return task_id def get(t_id): t = Task(id=t_id) t.get_result('perl6') if t.result: return t.result #return json.loads( t.result[0]) else: return "Snif" tid = put() print tid time.sleep(2) print get(tid)
python
from .di import DI from .standard_dependencies import StandardDependencies from .additional_config import AdditionalConfig
python
from spyd.registry_manager import register @register('client_message_handler') class SayteamHandler(object): message_type = 'N_SAYTEAM' @staticmethod def handle(client, room, message): player = client.get_player() room.handle_player_event('team_chat', player, message['text'])
python
# note: from __future__ import absolute_import from .click_models import * from .data_utils import * from .hparams import * from .metric_utils import * from .metrics import * from .propensity_estimator import * from .sys_tools import * from .team_draft_interleave import * from .RAdamOptimizer import *
python
import numpy as np import joblib from matplotlib import pyplot import pandas as pd import matplotlib.pyplot as plt import math from sklearn.preprocessing import StandardScaler from sklearn.model_selection import GridSearchCV, cross_val_score from sklearn.svm import SVC from sklearn.metrics import accuracy_score, f1_score, recall_score,precision_score, classification_report, confusion_matrix import collections from sklearn.model_selection import cross_val_predict from sklearn.metrics import precision_recall_curve, roc_curve from sklearn.multiclass import OneVsRestClassifier from sklearn.preprocessing import label_binarize np.random.seed(1337) # for reproducibility import xgboost as xgb # import packages for hyperparameters tuning from hyperopt import STATUS_OK, Trials, fmin, hp, tpe X_train = np.load('../data/train/X_train.npy') Y_train = np.load('../data/train/Y_train.npy') X_test = np.load('../data/test/set1/X_test.npy') Y_test = np.load('../data/test/set1/Y_test.npy') X_test2 = np.load('../data/test/set2/X_test2.npy') Y_test2 = np.load('../data/test/set2/Y_test2.npy') scaler = StandardScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.fit_transform(X_test) X_test2 = scaler.fit_transform(X_test2) print(X_train.shape) print(Y_train.shape) print(X_test.shape) print(Y_test.shape) print(X_test2.shape) print(Y_test2.shape) xgb_clf = xgb.XGBClassifier(learning_rate=0.1, n_estimators=1000, max_depth=5, min_child_weight=1, gamma=0, reg_alpha=0.01, n_jobs=-1, subsample=0.5, colsample_bytree=0.5, objective='multi:softmax', num_class=8)
python
from random import choice n = str(input('nome do 1° aluno: ')) n2 = str(input('nome do 2° aluno: ')) n3 = str(input('nome do 3° aluno: ')) n4 = str(input('nome do 4° aluno: ')) lista = (n,n2,n3,n4) print(f'O aluno escolhido é: {choice(lista)}')
python
# -*- coding: utf-8 -*- # Generated by Django 1.10.8 on 2019-01-18 15:19 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='SCIMPlatformSettings', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('update', models.DateTimeField(auto_now=True)), ('bearer_token', models.CharField(max_length=32, verbose_name='Bearer Token')), ], options={ 'verbose_name': 'scim platform settings', 'verbose_name_plural': 'scim platform settings', }, ), ]
python
# -*- coding: utf-8 -*- # Builtin Modules import time import traceback import functools # 3rd-party Modules import redis import six # Project Modules from worker.utils import toolkit, yaml_resources from worker.utils.log_helper import LogHelper CONFIG = yaml_resources.get('CONFIG') def get_config(c): config = { 'host' : c.get('host') or '127.0.0.1', 'port' : c.get('port') or 6379, 'db' : c.get('db') or c.get('database'), 'password': c.get('password') or None, 'ssl' : c.get('useSSL') or c.get('useTLS'), } if config['ssl'] is True: config['ssl_cert_reqs'] = None return config LIMIT_ARGS_DUMP = 200 # LUA LUA_UNLOCK_KEY_KEY_NUMBER = 1; LUA_UNLOCK_KEY = 'if redis.call("get", KEYS[1]) == ARGV[1] then return redis.call("del", KEYS[1]) else return 0 end '; CLIENT_CONFIG = None CLIENT = None class RedisHelper(object): def __init__(self, logger, config=None, database=None, *args, **kwargs): self.logger = logger self.skip_log = False self.checked_keys = set() if config: if database: config['db'] = database self.config = config self.config['tsMaxAge'] = config.get('tsMaxAge') or 3600 * 24 self.config['tsMaxPeriod'] = config.get('tsMaxPeriod') or 3600 * 24 * 3 self.config['tsMinInterval'] = config.get('tsMinInterval') or 60 self.client = redis.Redis(**get_config(config)) else: global CLIENT_CONFIG global CLIENT if not CLIENT: CLIENT_CONFIG = { 'host' : CONFIG['REDIS_HOST'], 'port' : CONFIG['REDIS_PORT'], 'database': CONFIG['REDIS_DATABASE'], 'password': CONFIG['REDIS_PASSWORD'], 'useTLS' : CONFIG['REDIS_USE_TLS'], } CLIENT_CONFIG['tsMaxAge'] = CONFIG.get('REDIS_TS_MAX_AGE') CLIENT_CONFIG['tsMaxPeriod'] = CONFIG.get('REDIS_TS_MAX_PERIOD') CLIENT_CONFIG['tsMinInterval'] = CONFIG.get('REDIS_TS_MIN_INTERVAL') CLIENT = redis.Redis(**get_config(CLIENT_CONFIG)) self.config = CLIENT_CONFIG self.client = CLIENT def __del__(self): if self.client and self.client is not CLIENT: self.client.close() def check(self): try: self.client.info() except Exception as e: for line in traceback.format_exc().splitlines(): self.logger.error(line) raise Exception(str(e)) def query(self, *args, **options): command = args[0] command_args = args[1:] if not self.skip_log: key = '' if len(command_args) > 1: key = command_args[0] + ' ...' elif len(command_args) > 0: key = command_args[0] options_dump = '' if options: options_dump = 'options=' + toolkit.json_dumps(options) self.logger.debug('[REDIS] Query `{} {}` {}'.format(command.upper(), key, options_dump)) return self.client.execute_command(*args, **options); def run(self, *args, **kwargs): command = args[0] command_args = args[1:] if not self.skip_log: key = '' if len(command_args) > 0: key = command_args[0] if isinstance(key, (list, tuple)): key = ', '.join([str(k) for k in key]) elif isinstance(key, dict): key = ', '.join(key.keys()) kwargs_dump = '' if kwargs: kwargs_dump = 'kwargs=' + toolkit.json_dumps(kwargs) self.logger.debug('[REDIS] Run `{} {}` {}'.format(command.upper(), key, kwargs_dump)) return getattr(self.client, command)(*command_args, **kwargs) def keys(self, pattern='*'): found_keys = [] COUNT_LIMIT = 1000 next_cursor = 0 while True: next_cursor, keys = self.run('scan', cursor=next_cursor, match=pattern, count=COUNT_LIMIT) if isinstance(keys, list) and len(keys) > 0: for k in keys: found_keys.append(six.ensure_str(k)) if next_cursor == 0: break found_keys = list(set(found_keys)) return found_keys def exists(self, key): return self.run('exists', key) def get(self, key): return self.run('get', key) def getset(self, key, value): return self.run('getset', key, value) def set(self, key, value): return self.run('set', key, value) def setnx(self, key, value): return self.run('setnx', key, value) def setex(self, key, max_age, value): if max_age <= 0: max_age = 1; return self.run('setex', key, max_age, value) def setexnx(self, key, max_age, value): if max_age <= 0: max_age = 1; return self.run('set', key, value, ex=max_age, nx=True) def mget(self, keys, *args): return self.run('mget', keys, *args) def mset(self, key_values, **kwargs): return self.run('mset', key_values, **kwargs) def incr(self, key): return self.run('incr', key) def incrby(self, key, increment): return self.run('incrby', key, amount=increment) def delete(self, keys): if not isinstance(keys, list): keys = [keys] return self.run('delete', *keys) def del_by_pattern(self, pattern): if not self.skip_log: self.logger.debug('[REDIS] DEL by pattern `{}`'.format(pattern)) keys = self.keys(pattern) if len(keys) <= 0: return None else: return self.delete(keys) def expire(self, key, expires): if expires <= 0: expires = 1 return self.run('expire', key, expires) def expireat(self, key, timestamp): return self.run('expireat', key, timestamp) def hkeys(self, key, pattern='*'): found_keys = [] COUNT_LIMIT = 1000 next_cursor = 0 while True: next_cursor, keys = self.run('hscan', key, cursor=next_cursor, match=pattern, count=COUNT_LIMIT) if len(keys) > 0: if isinstance(keys, dict): keys = list(keys.keys()) if isinstance(keys, list): for k in keys: found_keys.append(six.ensure_str(k)) if next_cursor == 0: break found_keys = list(set(found_keys)) return found_keys def hget(self, key, field): return self.run('hget', key, field) def hmget(self, key, fields): return self.run('hmget', key, fields) def hgetall(self, key): result = self.run('hgetall', key) result = dict([(six.ensure_str(k), v) for k, v in result.items()]) return result def hset(self, key, field, value): return self.run('hset', key, field, value) def hsetnx(self, key, field, value): return self.run('hsetnx', key, field, value) def hmset(self, key, obj): return self.run('hmset', key, obj) def hincr(self, key, field): return self.run('hincrby', key, field, amount=1) def hincrby(self, key, field, increment): return self.run('hincrby', key, field, amount=increment) def hdel(self, key, fields): return self.run('hdel', key, *fields) def lpush(self, key, value): return self.run('lpush', key, value) def rpush(self, key, value): return self.run('rpush', key, value) def lpop(self, key): return self.run('lpop', key) def rpop(self, key): return self.run('rpop', key) def llen(self, key): return self.run('llen', key) def lrange(self, key, start, stop): return self.run('lrange', key, start, stop); def ltrim(self, key, start, stop): return self.run('ltrim', key, start, stop); def rpoplpush(self, key, dest_key=None, dest_scope=None): if dest_key is None: dest_key = key if dest_scope is None: dest_scope = scope return self.run('rpoplpush', key, dest_key) def ttl(self, key): return self.run('ttl', key) def type(self, key): return self.run('type', key) def dbsize(self): return self.run('dbsize') def info(self): return self.run('info') def lock(self, lock_key, lock_value, max_lock_time): if max_lock_time <= 0: max_lock_time = 1 return self.run('set', lock_key, lock_value, ex=max_lock_time, nx=True) def extend_lock_time(self, lock_key, lock_value, max_lock_time): if max_lock_time <= 0: max_lock_time = 1 expected_lock_value = self.run('get', lock_key) expected_lock_value = six.ensure_str(expected_lock_value) if expected_lock_value != lock_value: raise Error('Not lock owner') self.run('expire', lock_key, max_lock_time) def unlock(self, lock_key, lock_value): return self.run('eval', LUA_UNLOCK_KEY, LUA_UNLOCK_KEY_KEY_NUMBER, lock_key, lock_value) def ts_parse_point(self, point): timestamp, value = six.ensure_str(point).split(',', 1) timestamp = int(timestamp.split('.')[0]) value = toolkit.json_loads(value) return [timestamp, value] def ts_add(self, key, value, timestamp=None, mode=None): mode = mode or 'update' if not self.skip_log: self.logger.debug('[REDIS] TS Add `{}`'.format(key)) if key not in self.checked_keys: cache_res = self.client.type(key) if six.ensure_str(cache_res) != 'zset': self.client.delete(key) self.checked_keys.add(key) timestamp = timestamp or int(time.time()) # 时间戳自动根据最小间隔对齐 timestamp = int(timestamp / self.config['tsMinInterval']) * self.config['tsMinInterval'] if mode.lower() == 'addup': prev_points = self.client.zrangebyscore(key, timestamp, timestamp) if prev_points: _, prev_value = self.ts_parse_point(prev_points[0]) value += float(prev_value) self.client.zremrangebyscore(key, timestamp, timestamp) value = toolkit.json_dumps(value) data = ','.join([str(timestamp), value]) self.client.zadd(key, {data: timestamp}) self.client.expire(key, self.config['tsMaxAge']) if self.config['tsMaxPeriod']: min_timestamp = int(time.time()) - self.config['tsMaxPeriod'] self.client.zremrangebyscore(key, '-inf', min_timestamp) def ts_get(self, key, start='-inf', stop='+inf', group_time=1, agg='avg', scale=1, ndigits=2, time_unit='s', dict_output=False, limit=None, fill_zero=False): if not self.skip_log: self.logger.debug('[REDIS] TS Get `{}`'.format(key)) if key not in self.checked_keys: cache_res = self.client.type(key) if six.ensure_str(cache_res) != 'zset': self.client.delete(key) self.checked_keys.add(key) ts_data = self.client.zrangebyscore(key, start, stop) ts_data = list(map(self.ts_parse_point, ts_data)) if ts_data and group_time and group_time >= 1: temp = [] # latest_timestamp = ts_data[-1][0] for d in ts_data: grouped_timestamp = int(d[0] / group_time) * group_time # grouped_timestamp = latest_timestamp - int((latest_timestamp - d[0]) / group_time) * group_time if len(temp) <= 0 or temp[-1][0] != grouped_timestamp: temp.append([grouped_timestamp, [d[1]]]) else: temp[-1][1].append(d[1]) for d in temp: if agg == 'count': d[1] = len(d[1]) elif agg == 'avg': count = len(d[1]) d[1] = functools.reduce(lambda acc, x: acc + x, d[1]) / count elif agg == 'sum': d[1] = functools.reduce(lambda acc, x: acc + x, d[1]) elif agg == 'min': d[1] = min(d[1]) elif agg == 'max': d[1] = max(d[1]) if fill_zero: zero_fill_map = dict([(d[0], d[1]) for d in temp]) _next_temp = [] for ts in range(int(temp[0][0]), int(temp[-1][0]) + group_time, group_time): _next_temp.append([ts, zero_fill_map.get(ts) or 0]) temp = _next_temp ts_data = temp if limit: ts_data = ts_data[-1 * limit:] for d in ts_data: if isinstance(d[1], (int, float)): if scale and scale != 1: d[1] = d[1] / scale if ndigits > 0: d[1] = round(d[1], ndigits) else: d[1] = int(d[1]) if time_unit == 'ms': d[0] = d[0] * 1000 if dict_output: ts_data = list(map(lambda x: { 't': x[0], 'v': x[1] }, ts_data)) return ts_data
python
from typing import Dict, Generator, Optional import numpy as np from netqasm.lang import instr as ins from netqasm.lang.instr import core, nv from netqasm.lang.instr.flavour import Flavour from netsquid.components import Instruction as NetSquidInstruction from netsquid.components.instructions import ( INSTR_CXDIR, INSTR_CYDIR, INSTR_INIT, INSTR_ROT_X, INSTR_ROT_Y, INSTR_ROT_Z, ) from netsquid.nodes.node import Node as NetSquidNode from pydynaa import EventExpression from squidasm.nqasm.executor.base import NetSquidExecutor T_InstrMap = Dict[ins.NetQASMInstruction, NetSquidInstruction] NV_NS_INSTR_MAPPING: T_InstrMap = { core.InitInstruction: INSTR_INIT, nv.RotXInstruction: INSTR_ROT_X, nv.RotYInstruction: INSTR_ROT_Y, nv.RotZInstruction: INSTR_ROT_Z, nv.ControlledRotXInstruction: INSTR_CXDIR, nv.ControlledRotYInstruction: INSTR_CYDIR, } class NVNetSquidExecutor(NetSquidExecutor): def __init__( self, node: NetSquidNode, name: Optional[str] = None, instr_log_dir: Optional[str] = None, flavour: Optional[Flavour] = None, instr_proc_time: int = 0, host_latency: int = 0, ) -> None: """Represents a QNodeOS processor that communicates with a QDevice that supports NV instructions""" super().__init__( node, name, instr_log_dir, instr_mapping=NV_NS_INSTR_MAPPING, instr_proc_time=instr_proc_time, host_latency=host_latency, ) def _do_meas( self, subroutine_id: int, q_address: int ) -> Generator[EventExpression, None, int]: position = self._get_position(subroutine_id=subroutine_id, address=q_address) if position != 0: # a carbon # Move the state to the electron (position=0) first and then measure the electron. # See https://gitlab.tudelft.nl/qinc-wehner/netqasm/netqasm-docs/-/blob/master/nv-gates-docs.md # for the circuit. self._logger.debug( f"Moving qubit from carbon (position {position}) to electron before measuring" ) yield from self._execute_qdevice_instruction( ns_instr=INSTR_INIT, qubit_mapping=[0] ) yield from self._execute_qdevice_instruction( ns_instr=INSTR_ROT_Y, qubit_mapping=[0], angle=np.pi / 2 ) yield from self._execute_qdevice_instruction( ns_instr=INSTR_CYDIR, qubit_mapping=[0, position], angle=-np.pi / 2 ) yield from self._execute_qdevice_instruction( ns_instr=INSTR_ROT_X, qubit_mapping=[0], angle=-np.pi / 2 ) yield from self._execute_qdevice_instruction( ns_instr=INSTR_CXDIR, qubit_mapping=[0, position], angle=np.pi / 2 ) yield from self._execute_qdevice_instruction( ns_instr=INSTR_ROT_Y, qubit_mapping=[0], angle=-np.pi / 2 ) # Explicitly free physical qubit 0, since the Executor will # only free the original qubit. self._clear_phys_qubit_in_memory(0) # Measure the electron. outcome = yield from super()._meas_physical_qubit(0) return outcome
python
import math from hurry.filesize import size def convert_web_speed_size(size_bytes): """ Convert byte to other Units of information and show in xbit vs xbyte :param size_bytes: :return: """ if size_bytes == 0: return "0B" size_name = ("B", "Kbit/s", "Mbit/s", "Gbit/s", "Tbit/s", "Pbit/s", "Ebit/s", " Zbit/s", "Ybit/s") i = int(math.floor(math.log(size_bytes, 1024))) p = math.pow(1024, i) s = round(size_bytes / p, 2) return f"{s} {size_name[i]}" def convert_byte_to(size_bytes): """ Convert byte to other Units of information :param size_bytes: :return: """ convert_size_bytes = size(size_bytes) return convert_size_bytes def convert_size_by_unit(size_bytes, to='GB'): """ Convert byte to other unit by select unit :param size_bytes: :param to: :return: """ if size_bytes == 0: return 0, '' power = {'B': 0, 'KB': 1, 'MB': 2, 'GB': 3, 'TB': 4, 'EB': 5, 'ZB': 6, 'YB': 7} i = power[to] p = math.pow(1024, i) float_size = round(size_bytes / p, 2) return float_size
python
# Prime Number Sieve # author: A1p5a import math def is_prime(num): # Returns True if num is a prime number, otherwise False. # Note: Generally, isPrime() is slower than primeSieve(). # all numbers less than 2 are not prime if num < 2: return False # see if num is divisible by any number up to the square root of num for i in range(2, int(math.sqrt(num)) + 1): if num % i == 0: return False return True def prime_sieve(sieve_size): # Returns a list of prime numbers calculated using # the Sieve of Eratosthenes algorithm. sieve = [True] * sieve_size sieve[0] = False # zero and one are not prime numbers sieve[1] = False # create the sieve for i in range(2, int(math.sqrt(sieve_size)) + 1): pointer = i * 2 while pointer < sieve_size: sieve[pointer] = False pointer += i # compile the list of primes primes = [] for i in range(sieve_size): if sieve[i]: primes.append(i) return primes
python
from NewDouban import NewDouban if __name__ == "__main__": douban = NewDouban() result = douban.search("知识考古学") for book in result: print(book)
python
#!/usr/bin/env python import rospy import actionlib import tf from math import radians, atan2, cos, sin from fetch_manipulation_pipeline.msg import GrabBagAction, GrabBagGoal import py_trees import py_trees_ros from geometry_msgs.msg import Pose from copy import deepcopy class GrabBagBehavior(py_trees_ros.actions.ActionClient): def __init__(self, name, *argv, **kwargs): super(GrabBagBehavior, self).__init__( name=name, action_spec=fetch_manipulation_pipeline.msg.GrabBagAction, action_goal=fetch_manipulation_pipeline.msg.GrabBagGoal(), action_namespace='grab_bag', *argv, **kwargs ) def initialise(self): super(GrabBagBehavior, self).initialise() # Get two points from blackboard blackboard = py_trees.blackboard.Blackboard() front_point = blackboard.get('front_handle') back_point = blackboard.get('back_handle') if front_point is None: rospy.logerr('handle points was not defined for grab bag behavior') self.action_goal = fetch_manipulation_pipeline.msg.GrabBagGoal() return # x1, y1 is the back handle. (x2, y2) is the front handle x1, y1, z1 = back_point x2, y2, z2 = front_point pose_ = Pose() # Calculate angles from the two points of the handles. x = 90 y = atan2(x1-x2, y1-y2) z = 0 # Calculate quaternions from the angles. quaternion = tf.transformations.quaternion_from_euler(radians(x), y, 0, 'rxyz') pose_.orientation.x = quaternion[0] pose_.orientation.y = quaternion[1] pose_.orientation.z = quaternion[2] pose_.orientation.w = quaternion[3] # Set grasp pose grasp_pose = deepcopy(pose_) # Set pre-grasp pose pre_grasp_pose = deepcopy(pose_); # Offset grasp_pose.position.x = x1 - 0.08 * cos(y) grasp_pose.position.y = y1 - 0.08 * cos(y) grasp_pose.position.z = (z1 + z2) / 2.0 # Offset pre_grasp_pose.position.x = x2 - 0.25 * cos(y) pre_grasp_pose.position.y = y2 - 0.25 * sin(y) pre_grasp_pose.position.z = (z1 + z2) / 2.0 # Set the action goal self.action_goal.grasp_pose = grasp_pose self.action_goal.pre_grasp_pose = pre_grasp_pose self.action_goal.cartesian_max_try = 3 rospy.loginfo('Pick Goal Constructed.')
python
import logging from pyradios.utils import setup_log_file LOG_FILENAME = "pyradios.log" logger = logging.getLogger(__name__) formatter = logging.Formatter( "[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s" ) file_handler = logging.FileHandler(setup_log_file(LOG_FILENAME)) file_handler.setFormatter(formatter) logger.setLevel(logging.INFO) logger.addHandler(file_handler)
python
import os import argparse from terminaltables import AsciiTable def _format(number): return '{:.4f}'.format(float(number)) parser = argparse.ArgumentParser(description='Display kitti results') parser.add_argument('--results', type=str, required=True, help='path to a kitti result folder') parser.add_argument('--noc', action='store_true') args = parser.parse_args() results = ['stats_flow_occ.txt', 'stats_disp_occ_0.txt', 'stats_disp_occ_1.txt', 'stats_scene_flow_occ.txt'] metrics = ['background', 'foreground', 'all', 'density'] table_data = [['FILE','BACKGROUND', 'FOREGROUND', 'ALL', 'DENSITY']] if args.noc: results = [x.replace('occ','noc') for x in results] for r in results: with open(os.path.join(args.results, r),'r') as result_file: lines = result_file.readlines() background, _, foreground, _, all, _, density = lines[0].strip().split(' ') values = [r, _format(background), _format(foreground), _format(all), _format(density)] table_data.append(values) table = AsciiTable(table_data) print('\nEvaluation results of {}:'.format(args.results)) print(table.table) with open(os.path.join(args.results,'report.txt'),'w') as f: for data in table_data: for i,value in enumerate(data): if i > 0: value = value.replace('.',',') f.write('{};'.format(value)) f.write('\n') print('report.txt has been written in {}'.format(args.results, 'KITTI'))
python
import asyncio import rlp import ethereum.transactions from ethereum import utils from ethereum.utils import normalize_key, ecsign from ethereum.transactions import unsigned_tx_from_tx, UnsignedTransaction # NOTE: this is to hotfix a bug in pyethereum's signing functions # fixed in https://github.com/ethereum/pyethereum/commit/d962694be03686a8e5c1d7459ae272b70a5c9f77 # but not yet included in a release class Transaction(ethereum.transactions.Transaction): def sign(self, key, network_id=None): """Sign this transaction with a private key. A potentially already existing signature would be overridden. """ if network_id is None: rawhash = utils.sha3(rlp.encode(unsigned_tx_from_tx(self), UnsignedTransaction)) else: assert 1 <= network_id < 2**63 - 18 rlpdata = rlp.encode(rlp.infer_sedes(self).serialize(self)[ :-3] + [network_id, b'', b'']) rawhash = utils.sha3(rlpdata) key = normalize_key(key) v, r, s = ecsign(rawhash, key) if network_id is not None: v += 8 + network_id * 2 ret = self.copy( v=v, r=r, s=s ) ret._sender = utils.privtoaddr(key) return ret class TransactionResponse: def __init__(self, jsonrpc, hash, nonce=None): self.jsonrpc = jsonrpc self.hash = hash self.nonce = nonce self._receipt = None async def status(self): receipt = await self.receipt() if receipt is None or receipt['blockNumber'] is None: return 'unconfirmed' return 'confirmed' async def receipt(self): if self._receipt: return self._receipt receipt = await self.jsonrpc.eth_getTransactionReceipt(self.hash) # cache result if the transaction is included in a block if receipt is not None and receipt['blockNumber'] is not None: self._receipt = receipt return receipt async def wait_for_confirmation(self): while (await self.status()) != 'confirmed': await asyncio.sleep(1) return await self.receipt() def __await__(self): return self.wait_for_confirmation().__await__()
python
#!/usr/bin/env python # -*- encoding: utf-8 -*- ''' @File : spider.py @Time : 2020-8-1 22:00:44 @Author : Recluse Xu @Version : 1.0 @Contact : [email protected] @Desc : 用Selenium处理SliderCaptcha ''' # here put the import lib from selenium.common.exceptions import TimeoutException from selenium.webdriver.common.action_chains import ActionChains from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support.wait import WebDriverWait import cv2 from browsermobproxy import Server import sys sys.path.append(sys.path[0][:sys.path[0].find('example')-1]) from my_util.selenium.selenium_chrome import get_selenium_chrome_web_driver class BrowsermobProxy(object): def __init__(self, browsermob_proxy_bat_location: str): self.server = Server(browsermob_proxy_bat_location, {'port': 9394}) def get_proxy(self): return self.server.create_proxy() def start_server(self): self.server.start() def stop_server(self): self.server.stop() def count_start_end_location(button_size, button_location, bar_size, bar_location): ''' 计算鼠标大致的起始坐标与终点坐标 ''' return [ {'x': button_location['x']+button_size['width']/2, 'y': button_location['y']+button_size['height']/2}, {'x': bar_location['x']+bar_size['width']-button_size['width']/2, 'y': bar_location['y']} ] def get_track(start_locationn, end_location): ''' 计算偏移 这里使用最简单的方法来获取偏移数组 计算好距离以后,让鼠标每次移动2像素 匀速移动 ''' distance = int(end_location['x'] - start_locationn['x']) + 1 return range(0, distance, 2) def move_mouse(chrome_driver, ver_button, track): # 移动鼠标 try: # 让鼠标点击并不放开滑块 ActionChains(chrome_driver).click_and_hold(ver_button).perform() for x in track: # 让鼠标根据偏移数组,一点一点的移动鼠标 ActionChains(chrome_driver).move_by_offset(xoffset=x, yoffset=0).perform() ActionChains(chrome_driver).release().perform() # 释放鼠标 except Exception: ActionChains(chrome_driver).release().perform() def get_distance(): img = cv2.imread('img.png') img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # ret, thresh = cv2.threshold(img, 230, 255, cv2.THRESH_BINARY_INV) img = cv2.cornerHarris(img, 2, 3, 0.04) cv2.imshow('img', img) cv2.waitKey(100000) def process(): bp = BrowsermobProxy(browsermob_proxy_bat_location) _proxy = bp.get_proxy() _proxy.new_har(".picsum.photos", options={'captureHeaders': True, 'captureContent': True}) chrome_driver = get_selenium_chrome_web_driver(proxy_server=str(_proxy.proxy)) chrome_driver.get('http://127.0.0.1:5000/LearningSpider#!') wait = WebDriverWait(chrome_driver, 15) try: item_button = wait.until( EC.element_to_be_clickable((By.ID, "the_slidewrcaptcha"))) item_button.click() except TimeoutException as e: print(e) exit(1) # 等待Ajax加载完毕 wait.until( lambda x: EC.text_to_be_present_in_element((By.XPATH, "//*[@id=\"captcha\"]/div/span"), "滑动填充")(chrome_driver) ) ver_button = wait.until( EC.element_to_be_clickable((By.XPATH, "//*[@id=\"captcha\"]/div/div[2]/div"))) the_img = wait.until( EC.presence_of_element_located((By.XPATH, "//*[@id=\"captcha\"]/canvas[1]"))) result = _proxy.har for entry in result['log']['entries']: _url = entry['request']['url'] # 根据URL找到数据接口 if "/api/v2/aweme/post" in _url: _response = entry['response'] _content = _response['content']['text'] # 获取接口返回内容 print(_content) bp.stop_server() # the_img.screenshot("img.png") # get_distance() # start_location, end_location = count_start_end_location(ver_button.size, ver_button.location, slide_bar.size, slide_bar.location) # track = get_track(start_location, end_location) # move_mouse(chrome_driver, ver_button, track) if __name__ == "__main__": process()
python
from __future__ import absolute_import, print_function import tensorflow as tf from tensorflow.keras import regularizers from niftynet.network.highres3dnet import HighResBlock from tests.niftynet_testcase import NiftyNetTestCase class HighResBlockTest(NiftyNetTestCase): def test_3d_increase_shape(self): input_shape = (2, 16, 16, 16, 8) x = tf.ones(input_shape) highres_layer = HighResBlock(n_output_chns=16, kernels=(3, 3), with_res=True) out = highres_layer(x, is_training=True) print(highres_layer) with self.cached_session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) out = sess.run(out) self.assertAllClose((2, 16, 16, 16, 16), out.shape) def test_3d_same_shape(self): input_shape = (2, 16, 16, 16, 8) x = tf.ones(input_shape) highres_layer = HighResBlock(n_output_chns=8, kernels=(3, 3), with_res=True) out = highres_layer(x, is_training=True) print(highres_layer) with self.cached_session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) out = sess.run(out) self.assertAllClose((2, 16, 16, 16, 8), out.shape) def test_3d_reduce_shape(self): input_shape = (2, 16, 16, 16, 8) x = tf.ones(input_shape) highres_layer = HighResBlock(n_output_chns=4, kernels=(3, 3), with_res=True) out = highres_layer(x, is_training=True) print(highres_layer) with self.cached_session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) out = sess.run(out) self.assertAllClose((2, 16, 16, 16, 4), out.shape) def test_3d_reg_increase_shape(self): input_shape = (2, 16, 16, 16, 8) x = tf.ones(input_shape) highres_layer = HighResBlock( n_output_chns=16, kernels=(3, 3), with_res=True, w_regularizer=regularizers.L2(0.3)) out = highres_layer(x, is_training=True) print(highres_layer) with self.cached_session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) out = sess.run(out) self.assertAllClose((2, 16, 16, 16, 16), out.shape) def test_3d_reg_same_shape(self): input_shape = (2, 16, 16, 16, 8) x = tf.ones(input_shape) highres_layer = HighResBlock( n_output_chns=8, kernels=(3, 3), with_res=True, w_regularizer=regularizers.L2(0.3)) out = highres_layer(x, is_training=True) print(highres_layer) with self.cached_session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) out = sess.run(out) self.assertAllClose((2, 16, 16, 16, 8), out.shape) def test_3d_reg_reduce_shape(self): input_shape = (2, 16, 16, 16, 8) x = tf.ones(input_shape) highres_layer = HighResBlock( n_output_chns=4, kernels=(3, 3), with_res=True, w_regularizer=regularizers.L2(0.3)) out = highres_layer(x, is_training=True) print(highres_layer) with self.cached_session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) out = sess.run(out) self.assertAllClose((2, 16, 16, 16, 4), out.shape) def test_2d_increase_shape(self): input_shape = (2, 16, 16, 8) x = tf.ones(input_shape) highres_layer = HighResBlock(n_output_chns=16, kernels=(3, 3), with_res=True) out = highres_layer(x, is_training=True) print(highres_layer) with self.cached_session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) out = sess.run(out) self.assertAllClose((2, 16, 16, 16), out.shape) def test_2d_same_shape(self): input_shape = (2, 16, 16, 8) x = tf.ones(input_shape) highres_layer = HighResBlock(n_output_chns=8, kernels=(3, 3), with_res=True) out = highres_layer(x, is_training=True) print(highres_layer) with self.cached_session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) out = sess.run(out) self.assertAllClose((2, 16, 16, 8), out.shape) def test_2d_reduce_shape(self): input_shape = (2, 16, 16, 8) x = tf.ones(input_shape) highres_layer = HighResBlock(n_output_chns=4, kernels=(3, 3), with_res=True) out = highres_layer(x, is_training=True) print(highres_layer) with self.cached_session() as sess: sess.run(tf.compat.v1.global_variables_initializer()) out = sess.run(out) self.assertAllClose((2, 16, 16, 4), out.shape) if __name__ == "__main__": tf.test.main()
python
# -*- coding: utf-8 -*- """ @created on: 4/19/20, @author: Shreesha N, @version: v0.0.1 @system name: badgod Description: ..todo:: """ from torch.utils.tensorboard import SummaryWriter import torch import torch.nn as nn import torch.optim as optim import pandas as pd import numpy as np from torch import tensor import time import json import cv2 import random import torchvision import random from alcoaudio.networks.oneclass_net import OneClassNN, ConvAutoEncoder from alcoaudio.utils import file_utils from alcoaudio.datagen.audio_feature_extractors import preprocess_data from alcoaudio.utils.network_utils import accuracy_fn_ocnn, calc_average_class_score, log_summary_ocnn, normalize_image, \ custom_confusion_matrix, \ log_conf_matrix, write_to_npy from alcoaudio.utils.data_utils import read_h5py, read_npy from alcoaudio.datagen.augmentation_methods import librosaSpectro_to_torchTensor, time_mask, freq_mask, time_warp class OCNNRunner: def __init__(self, args): self.run_name = args.run_name + '_' + str(time.time()).split('.')[0] self.current_run_basepath = args.network_metrics_basepath + '/' + self.run_name + '/' self.learning_rate = args.learning_rate self.epochs = args.epochs self.test_net = args.test_net self.train_net = args.train_net self.batch_size = args.batch_size self.num_classes = args.num_classes self.audio_basepath = args.audio_basepath self.train_data_file = args.train_data_file self.test_data_file = args.test_data_file self.data_read_path = args.data_save_path self.is_cuda_available = torch.cuda.is_available() self.display_interval = args.display_interval self.sampling_rate = args.sampling_rate self.sample_size_in_seconds = args.sample_size_in_seconds self.overlap = args.overlap self.c = tensor(0.0) self.r = tensor(0.0) self.nu = None # Updated in data_reader() self.eps = 0.1 self.network_metrics_basepath = args.network_metrics_basepath self.tensorboard_summary_path = self.current_run_basepath + args.tensorboard_summary_path self.network_save_path = self.current_run_basepath + args.network_save_path self.network_restore_path = args.network_restore_path self.device = torch.device("cuda" if self.is_cuda_available else "cpu") self.network_save_interval = args.network_save_interval self.normalise = args.normalise_while_training self.dropout = args.dropout self.threshold = args.threshold self.debug_filename = self.current_run_basepath + '/' + args.debug_filename paths = [self.network_save_path, self.tensorboard_summary_path] file_utils.create_dirs(paths) self.cae_network = ConvAutoEncoder() self.cae_model_restore_path = args.cae_model_restore_path self.cae_network.load_state_dict(torch.load(self.cae_model_restore_path, map_location=self.device)) self.cae_network.eval() self.network = OneClassNN().to(self.device) self.learning_rate_decay = args.learning_rate_decay self.optimiser = optim.Adam(self.network.parameters(), lr=self.learning_rate) self.scheduler = torch.optim.lr_scheduler.ExponentialLR(self.optimiser, gamma=self.learning_rate_decay) self._min, self._max = float('inf'), -float('inf') if self.train_net: self.network.train() self.log_file = open(self.network_save_path + '/' + self.run_name + '.log', 'w') self.log_file.write(json.dumps(args)) if self.test_net: print('Loading Network') self.network.load_state_dict(torch.load(self.network_restore_path, map_location=self.device)) self.network.eval() self.log_file = open(self.network_restore_path.replace('_40.pt', '.log'), 'a') print('\n\n\n********************************************************', file=self.log_file) print('Testing Model - ', self.network_restore_path) print('Testing Model - ', self.network_restore_path, file=self.log_file) print('********************************************************', file=self.log_file) self.writer = SummaryWriter(self.tensorboard_summary_path) print("Network config:\n", self.network) print("Network config:\n", self.network, file=self.log_file) self.batch_loss, self.batch_accuracy, self.uar = [], [], [] print('Configs used:\n', json.dumps(args, indent=4)) print('Configs used:\n', json.dumps(args, indent=4), file=self.log_file) def data_reader(self, data_filepath, label_filepath, train, should_batch=True, shuffle=True): input_data, labels = read_npy(data_filepath), read_npy(label_filepath) if train: # nu declared in init, initialized here based on the number of anomalies. # Here intoxicated samples are considered anomalies self.nu = sum(labels) / len(labels) print('Calculated value of Nu ', self.nu) print('Calculated value of Nu ', self.nu, file=self.log_file) for x in input_data: self._min = min(np.min(x), self._min) self._max = max(np.max(x), self._max) data = [(x, y) for x, y in zip(input_data, labels)] random.shuffle(data) input_data, labels = np.array([x[0] for x in data]), [x[1] for x in data] print('Total data ', len(input_data)) print('Event rate', sum(labels) / len(labels)) print(np.array(input_data).shape, np.array(labels).shape) print('Total data ', len(input_data), file=self.log_file) print('Event rate', sum(labels) / len(labels), file=self.log_file) print(np.array(input_data).shape, np.array(labels).shape, file=self.log_file) print('Min max values used for normalisation ', self._min, self._max) print('Min max values used for normalisation ', self._min, self._max, file=self.log_file) # Normalizing `input data` on train dataset's min and max values if self.normalise: input_data = (input_data - self._min) / (self._max - self._min) if should_batch: batched_input = [input_data[pos:pos + self.batch_size] for pos in range(0, len(input_data), self.batch_size)] batched_labels = [labels[pos:pos + self.batch_size] for pos in range(0, len(labels), self.batch_size)] return batched_input, batched_labels else: return input_data, labels def run_for_epoch(self, epoch, x, y, type): self.test_batch_loss, self.test_batch_accuracy, self.test_batch_uar, self.test_scores_list, audio_for_tensorboard_test = [], [], [], [], None with torch.no_grad(): for i, (audio_data, label) in enumerate(zip(x, y)): label = tensor(label).float() audio_data = tensor(audio_data) latent_vector = self.get_latent_vector(audio_data) test_predictions, w, v = self.network(latent_vector) test_loss = self.loss_function(test_predictions, w, v) test_scores = self.calc_scores(test_predictions) test_accuracy, test_uar = accuracy_fn_ocnn(test_scores, label) self.test_scores_list.extend(test_scores) self.test_batch_loss.append(test_loss.numpy()) self.test_batch_accuracy.append(test_accuracy.numpy()) self.test_batch_uar.append(test_uar) print(f'***** {type} Metrics ***** ') print(f'***** {type} Metrics ***** ', file=self.log_file) print( f"Loss: {np.mean(self.test_batch_loss)} | Accuracy: {np.mean(self.test_batch_accuracy)} | UAR: {np.mean(self.test_batch_uar)}") print( f"Loss: {np.mean(self.test_batch_loss)} | Accuracy: {np.mean(self.test_batch_accuracy)} | UAR: {np.mean(self.test_batch_uar)}", file=self.log_file) y = [item for sublist in y for item in sublist] pos_score, neg_score = calc_average_class_score(tensor(self.test_scores_list), y) log_summary_ocnn(self.writer, epoch, accuracy=np.mean(self.test_batch_accuracy), loss=np.mean(self.test_batch_loss), uar=np.mean(self.test_batch_uar), lr=self.optimiser.state_dict()['param_groups'][0]['lr'], r=self.r, positive_class_score=pos_score, negative_class_score=neg_score, type=type) def get_latent_vector(self, audio_data): latent_filter_maps, _, _ = self.cae_network.encoder(audio_data) latent_vector = latent_filter_maps.view(-1, latent_filter_maps.size()[1:].numel()) return latent_vector.detach() def loss_function(self, y_pred, w, v): w = w.state_dict()['weight'] v = v.state_dict()['weight'] term1 = 0.5 * torch.sum(w ** 2) term2 = 0.5 * torch.sum(v ** 2) term3 = 1 / self.nu * torch.mean(torch.max(tensor(0.0), self.r - y_pred)) term4 = -1 * self.r # term3 = self.r ** 2 + torch.sum(torch.max(tensor(0.0), (y_pred - self.c) ** 2 - self.r ** 2), axis=1) # term3 = 1 / self.nu * torch.mean(term3) return term1 + term2 + term3 + term4 def calc_scores(self, outputs): scores = torch.sum((outputs - self.c) ** 2, axis=1) return scores def update_r_and_c(self, outputs): centroids = torch.mean(outputs, axis=0) centroids[(abs(centroids) < self.eps) & (centroids < 0)] = -self.eps centroids[(abs(centroids) < self.eps) & (centroids > 0)] = self.eps scores = torch.sum((outputs - centroids) ** 2, axis=1) sorted_scores, _ = torch.sort(scores) self.r = np.percentile(sorted_scores, self.nu * 100) # Updating the value of self.r self.c = centroids def initalize_c_and_r(self, train_x): predictions_list = [] for batch in train_x: batch = tensor(batch) latent_vec = self.get_latent_vector(batch) preds, _, _ = self.network(latent_vec) predictions_list.extend(preds.detach().numpy()) self.update_r_and_c(tensor(predictions_list)) def train(self): # For purposes of calculating normalized values, call this method with train data followed by test train_data, train_labels = self.data_reader(self.data_read_path + 'train_challenge_with_d1_data.npy', self.data_read_path + 'train_challenge_with_d1_labels.npy', shuffle=True, train=True) dev_data, dev_labels = self.data_reader(self.data_read_path + 'dev_challenge_with_d1_data.npy', self.data_read_path + 'dev_challenge_with_d1_labels.npy', shuffle=False, train=False) test_data, test_labels = self.data_reader(self.data_read_path + 'test_challenge_data.npy', self.data_read_path + 'test_challenge_labels.npy', shuffle=False, train=False) total_step = len(train_data) train_labels_flattened = [item for sublist in train_labels for item in sublist] self.w, self.v = None, None # Initialize c and r which is declared in init, on entire train data self.initalize_c_and_r(train_data) for epoch in range(1, self.epochs): self.batch_loss, self.batch_accuracy, self.batch_uar, self.total_predictions, self.total_scores, audio_for_tensorboard_train = [], [], [], [], [], None for i, (audio_data, label) in enumerate(zip(train_data, train_labels)): self.optimiser.zero_grad() label = tensor(label).float() audio_data = tensor(audio_data) latent_vector = self.get_latent_vector(audio_data) # if i == 0 and epoch == 1: # self.writer.add_graph(self.network, tensor(sample_data)) predictions, w, v = self.network(latent_vector) loss = self.loss_function(predictions, w, v) loss.backward() self.optimiser.step() self.total_predictions.extend(predictions.detach().numpy()) scores = self.calc_scores(predictions) self.total_scores.extend(scores) accuracy, uar = accuracy_fn_ocnn(scores, label) self.batch_loss.append(loss.detach().numpy()) self.batch_accuracy.append(accuracy) self.batch_uar.append(uar) if i % self.display_interval == 0: print( f"Epoch: {epoch}/{self.epochs} | Step: {i}/{total_step} | Loss: {loss} | Accuracy: {accuracy} | UAR: {uar}") print( f"Epoch: {epoch}/{self.epochs} | Step: {i}/{total_step} | Loss: {loss} | Accuracy: {accuracy} | UAR: {uar}", file=self.log_file) pos_class_score, neg_class_score = calc_average_class_score(tensor(self.total_scores), train_labels_flattened) self.update_r_and_c(tensor(self.total_predictions)) # Update value of r and c after every epoch # Decay learning rate self.scheduler.step(epoch=epoch) log_summary_ocnn(self.writer, epoch, accuracy=np.mean(self.batch_accuracy), loss=np.mean(self.batch_loss), uar=np.mean(self.batch_uar), lr=self.optimiser.state_dict()['param_groups'][0]['lr'], r=self.r, positive_class_score=pos_class_score, negative_class_score=neg_class_score, type='Train') print('***** Overall Train Metrics ***** ') print('***** Overall Train Metrics ***** ', file=self.log_file) print( f"Loss: {np.mean(self.batch_loss)} | Accuracy: {np.mean(self.batch_accuracy)} | UAR: {np.mean(self.batch_uar)} ") print( f"Loss: {np.mean(self.batch_loss)} | Accuracy: {np.mean(self.batch_accuracy)} | UAR: {np.mean(self.batch_uar)} ", file=self.log_file) print('Learning rate ', self.optimiser.state_dict()['param_groups'][0]['lr']) print('Learning rate ', self.optimiser.state_dict()['param_groups'][0]['lr'], file=self.log_file) # dev data self.run_for_epoch(epoch, dev_data, dev_labels, type='Dev') # test data self.run_for_epoch(epoch, test_data, test_labels, type='Test') if epoch % self.network_save_interval == 0: save_path = self.network_save_path + '/' + self.run_name + '_' + str(epoch) + '.pt' torch.save(self.network.state_dict(), save_path) print('Network successfully saved: ' + save_path) def test(self): test_data, test_labels = self.data_reader(self.data_read_path + 'test_data.npy', shuffle=False, should_batch=False) test_data, test_labels = test_data, test_labels test_predictions = self.network(test_data).detach() print(test_predictions) test_predictions = nn.Sigmoid()(test_predictions).squeeze(1) print(test_predictions) test_accuracy = accuracy_fn_ocnn(test_predictions, test_labels, self.threshold) print(f"Accuracy: {test_accuracy}") print(f"Accuracy: {test_accuracy}", file=self.log_file)
python
from django.db import models from .Newsletterapi import * # Create your models here. """class Summary_Art(models.Model): url = models.TextField() summary = get_summary(url) text = summary[0] summary = summary[1] #user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE) #Option to add Users to Model def get_summary(): return summary def get_text(): return text"""
python
"""empty message Revision ID: dc0c3839e0c4 Revises: 962314b7ff85 Create Date: 2021-12-07 08:58:26.839235 """ # revision identifiers, used by Alembic. revision = 'dc0c3839e0c4' down_revision = '962314b7ff85' from alembic import op import sqlalchemy as sa def upgrade(): # ### commands auto generated by Alembic - please adjust! ### pass # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### pass # ### end Alembic commands ###
python
import requests from django.conf import settings from django.test import TestCase, RequestFactory from django.utils.six import text_type from dps.transactions import make_payment from dps.models import Transaction from .models import Payment class DpsTestCase(TestCase): def setUp(self): self.factory = RequestFactory() def test_interactive(self): if not settings.PXPAY_USERID: # can't test the actual dps integration without test credentials return amount = 112.45 payment = Payment.objects.create(amount=amount) request = self.factory.get('/', HTTP_HOST='localhost:8000') response = make_payment(payment, request=request) self.assertEqual(response.status_code, 302) response2 = requests.get(response['Location']) # check the dps page looks approximately correct self.assertIn('Payment Checkout', response2.text) self.assertIn(text_type(amount), response2.text) def test_recurring(self): pass def test_status_update(self): payment = Payment.objects.create(amount=1) trans = Transaction.objects.create(content_object=payment, status=Transaction.PROCESSING) self.assertEqual(trans.complete_transaction(True), True) self.assertEqual(trans.status, Transaction.SUCCESSFUL) # complete_transaction should only return True once self.assertEqual(trans.complete_transaction(True), False) # and shouldn't change once done self.assertEqual(trans.complete_transaction(False), False) self.assertEqual(trans.status, Transaction.SUCCESSFUL)
python
import torch.nn as nn from n3 import ExternNode class Linear(ExternNode): input_channels: int output_channels: int bias: bool def __init__(self, **kwargs): super().__init__(**kwargs) self._inner = nn.Linear(self.input_channels, self.output_channels, self.bias) def forward(self, x): return self._inner(x)
python
#! /usr/bin/env python3 from scripts.fileReadWriteOperations import * import copy import math import os import sys import pandas as pd def mergeTwoTranscripts( whole_annotations, transcript_id_i, transcript_id_j, chromosome ): """ """ # print("Merging",transcript_id_i,transcript_id_j) chromosome = transcript_id_i.split( "." )[0] transcript_id_i_info = whole_annotations[transcript_id_i] transcript_id_j_info = whole_annotations[transcript_id_j] new_transcript_id = ".".join( transcript_id_i.split( "." )[:-1] ) + "_" + transcript_id_i.split( "." )[-1] + "_merged_" + "_".join( transcript_id_j.split( "." )[:-1] ) + "." + transcript_id_j.split( "." )[-1] # print(transcript_id_i,transcript_id_j,new_transcript_id) sys.stdout.flush() whole_annotations[new_transcript_id] = {"exons":copy.deepcopy( whole_annotations[transcript_id_i]["exons"] ), "introns":[], "cov":whole_annotations[transcript_id_i]["cov"], "TPM":whole_annotations[transcript_id_i]["TPM"], "FPKM":whole_annotations[transcript_id_i]["FPKM"], "direction":whole_annotations[transcript_id_i]["direction"], "chromosome":chromosome, "annotator":"FINDER" } whole_annotations[new_transcript_id]["exons"][-1] = [whole_annotations[transcript_id_i]["exons"][-1][0], whole_annotations[transcript_id_j]["exons"][0][1]] if len( whole_annotations[transcript_id_j]["exons"] ) > 1: whole_annotations[new_transcript_id]["exons"].extend( whole_annotations[transcript_id_j]["exons"][1:] ) i = 1 while i < len( whole_annotations[new_transcript_id]["exons"] ): whole_annotations[new_transcript_id]["introns"].append( [whole_annotations[new_transcript_id]["exons"][i - 1][1] + 1, whole_annotations[new_transcript_id]["exons"][i][0] - 1] ) i += 1 return whole_annotations def mergeCloselySpacedTranscripts( options ): """ """ input_gtf_filename = options.output_assemblies_psiclass_terminal_exon_length_modified + "/combined/combined_cov_opp_split_redundancy_removed.gtf" output_gtf_filename = options.output_assemblies_psiclass_terminal_exon_length_modified + "/combined/combined_merged_transcripts.gtf" if os.path.exists( output_gtf_filename ) == True:return whole_annotations, useless1, useless2 = readAllTranscriptsFromGTFFileInParallel( [input_gtf_filename, "dummy", "dummy"] ) all_transcript_info = [] for transcript_id in whole_annotations: chromosome = whole_annotations[transcript_id]["chromosome"] transcript_start = whole_annotations[transcript_id]["transcript_start"] transcript_end = whole_annotations[transcript_id]["transcript_end"] cov = whole_annotations[transcript_id]["cov"] fpkm = whole_annotations[transcript_id]["FPKM"] tpm = whole_annotations[transcript_id]["TPM"] direction = whole_annotations[transcript_id]["direction"] all_transcript_info.append( [chromosome, transcript_id, transcript_start, transcript_end, cov, fpkm, tpm, direction] ) all_transcript_info_pd = pd.DataFrame( all_transcript_info, columns = ["chromosome", "transcript_id", "transcript_start", "transcript_end", "cov", "fpkm", "tpm", "direction"] ) all_transcript_info_pd = all_transcript_info_pd.sort_values( by = ["chromosome", "transcript_start"] ) remove_these_transcripts = [] for row_num, row in all_transcript_info_pd.iterrows(): chromosome, transcript_id, transcript_start, transcript_end, cov, fpkm, tpm, direction = row if direction == ".":continue potential_merger_transcript = all_transcript_info_pd[( all_transcript_info_pd["chromosome"] == chromosome ) & ( all_transcript_info_pd["transcript_id"] != transcript_id ) & ( all_transcript_info_pd["transcript_start"] >= transcript_end ) & ( all_transcript_info_pd["direction"] == direction ) & ( all_transcript_info_pd["transcript_start"] - transcript_end <= 5 ) ] if potential_merger_transcript.shape[0] > 0: for row_num_i, row_i in potential_merger_transcript.iterrows(): chromosome_i, transcript_id_i, transcript_start_i, transcript_end_i, cov_i, fpkm_i, tpm_i, direction_i = row_i if math.fabs( tpm - tpm_i ) < 2 and max( tpm, tpm_i ) < 5 and "cov" not in transcript_id and "cov" not in transcript_id_i: # print(transcript_id,transcript_id_i,tpm,tpm_i) remove_these_transcripts.append( transcript_id ) remove_these_transcripts.append( transcript_id_i ) whole_annotations = mergeTwoTranscripts( whole_annotations, transcript_id, transcript_id_i, chromosome_i ) sys.stdout.flush() for transcript_id in list( set( remove_these_transcripts ) ): chromosome = transcript_id.split( "." )[0] del whole_annotations[transcript_id] writeTranscriptsToFile( [whole_annotations, output_gtf_filename, 0] )
python
""" 85 maximal rectangle hard Given a rows x cols binary matrix filled with 0's and 1's, find the largest rectangle containing only 1's and return its area. """ from typing import List class Solution: def maximalRectangle(self, matrix: List[List[str]]) -> int:
python
from src import chck_res import pytest @pytest.fixture(scope="module") def base_chck(): data="sandwich" return (chck_res(data))
python
import gym import numpy as np import threading class FakeMultiThread(threading.Thread): def __init__(self, func, args=()): super().__init__() self.func = func self.args = args def run(self): self.result = self.func(*self.args) def get_result(self): try: return self.result except Exception: return None class gym_envs(object): def __init__(self, gym_env_name, n, render_mode='first'): ''' Input: gym_env_name: gym training environment id, i.e. CartPole-v0 n: environment number render_mode: mode of rendering, optional: first, last, all, random_[num] -> i.e. random_2, [list] -> i.e. [0, 2, 4] ''' self.n = n # environments number self.envs = [gym.make(gym_env_name) for _ in range(self.n)] # process observation self.obs_space = self.envs[0].observation_space if isinstance(self.obs_space, gym.spaces.box.Box): self.obs_high = self.obs_space.high self.obs_low = self.obs_space.low self.obs_type = 'visual' if len(self.obs_space.shape) == 3 else 'vector' self.reward_threshold = self.envs[0].env.spec.reward_threshold # reward threshold refer to solved # process action self.action_space = self.envs[0].action_space if isinstance(self.action_space, gym.spaces.box.Box): self.action_type = 'continuous' self.action_high = self.action_space.high self.action_low = self.action_space.low elif isinstance(self.action_space, gym.spaces.tuple.Tuple): self.action_type = 'Tuple(Discrete)' else: self.action_type = 'discrete' self.action_mu, self.action_sigma = self._get_action_normalize_factor() self._get_render_index(render_mode) def _get_render_index(self, render_mode): ''' get render windows list, i.e. [0, 1] when there are 4 training enviornment. ''' assert isinstance(render_mode, (list, str)), 'render_mode must have type of str or list.' if isinstance(render_mode, list): assert all([isinstance(i, int) for i in render_mode]), 'items in render list must have type of int' assert min(index) >= 0, 'index must larger than zero' assert max(index) <= self.n, 'render index cannot larger than environment number.' self.render_index = render_mode elif isinstance(render_mode, str): if render_mode == 'first': self.render_index = [0] elif render_mode == 'last': self.render_index = [-1] elif render_mode == 'all': self.render_index = [i for i in range(self.n)] else: a, b = render_mode.split('_') if a == 'random' and 0 < int(b) <= self.n: import random self.render_index = random.sample([i for i in range(self.n)], int(b)) else: raise Exception('render_mode must be first, last, all, [list] or random_[num]') def render(self): ''' render game windows. ''' [self.envs[i].render() for i in self.render_index] def close(self): ''' close all environments. ''' [env.close() for env in self.envs] def sample_action(self): ''' generate ramdom actions for all training environment. ''' return np.array([env.action_space.sample() for env in self.envs]) def reset(self): self.dones_index = [] threadpool = [] for i in range(self.n): th = FakeMultiThread(self.envs[i].reset, args=()) threadpool.append(th) for th in threadpool: th.start() for th in threadpool: threading.Thread.join(th) obs = np.array([threadpool[i].get_result() for i in range(self.n)]) obs = self._maybe_one_hot(obs) return obs # if self.obs_type == 'visual': # return np.array([threadpool[i].get_result()[np.newaxis, :] for i in range(self.n)]) # else: # return np.array([threadpool[i].get_result() for i in range(self.n)]) def step(self, actions, scale=True): if scale == True: actions = self.action_sigma * actions + self.action_mu if self.action_type == 'discrete': actions = actions.reshape(-1,) elif self.action_type == 'Tuple(Discrete)': actions = actions.reshape(self.n, -1).tolist() threadpool = [] for i in range(self.n): th = FakeMultiThread(self.envs[i].step, args=(actions[i], )) threadpool.append(th) for th in threadpool: th.start() for th in threadpool: threading.Thread.join(th) results = [threadpool[i].get_result() for i in range(self.n)] # if self.obs_type == 'visual': # results = [ # [threadpool[i].get_result()[0][np.newaxis, :], *threadpool[i].get_result()[1:]] # for i in range(self.n)] # else: # results = [threadpool[i].get_result() for i in range(self.n)] obs, reward, done, info = [np.array(e) for e in zip(*results)] obs = self._maybe_one_hot(obs) self.dones_index = np.where(done)[0] return obs, reward, done, info def partial_reset(self): threadpool = [] for i in self.dones_index: th = FakeMultiThread(self.envs[i].reset, args=()) threadpool.append(th) for th in threadpool: th.start() for th in threadpool: threading.Thread.join(th) obs = np.array([threadpool[i].get_result() for i in range(self.dones_index.shape[0])]) obs = self._maybe_one_hot(obs, is_partial=True) return obs # if self.obs_type == 'visual': # return np.array([threadpool[i].get_result()[np.newaxis, :] for i in range(self.dones_index.shape[0])]) # else: # return np.array([threadpool[i].get_result() for i in range(self.dones_index.shape[0])]) def _get_action_normalize_factor(self): ''' get action mu and sigma. mu: action bias. sigma: action scale input: self.action_low: [-2, -3], self.action_high: [2, 6] return: mu: [0, 1.5], sigma: [2, 4.5] ''' if self.action_type == 'continuous': return (self.action_high + self.action_low) / 2, (self.action_high - self.action_low) / 2 else: return 0, 1 def _maybe_one_hot(self, obs, is_partial=False): """ Change discrete observation from list(int) to list(one_hot) format. for example: action: [[1, 0], [2, 1]] observation space: [3, 4] environment number: 2 then, output: [[0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] [0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0.]] """ obs_number = len(self.dones_index) if is_partial else self.n if hasattr(self.obs_space, 'n'): obs = obs.reshape(obs_number, -1) if isinstance(self.obs_space.n, (int, np.int32)): dim = [int(self.obs_space.n)] else: dim = list(self.obs_space.n) # 在CliffWalking-v0环境其类型为numpy.int32 multiplication_factor = dim[1:] + [1] n = np.array(dim).prod() ints = obs.dot(multiplication_factor) x = np.zeros([obs.shape[0], n]) for i, j in enumerate(ints): x[i, j] = 1 return x else: return obs
python
books = [ (1, "Learning Python", "", "Марк Лътз, Дейвид Асър", "O'Reily", 1999, 22.7), (2, "Think Python", "An Introduction to Software Design", "Алън Б. Дауни", "O'Reily", 2002, 9.4), (3, "Python Cookbook", "Recipes for Mastering Python 3", "Браян К. Джоунс и Дейвид М. Баазли", "O'Reily", 2011, 135.9) ] def print_bond(items): result = "" sum = 0 for b in items: # result += "| {:^3d} | {:<15.15s} | {:<15.15s} | {:<20.20s} | {:^12.12s} | {:<4d} | {:>7.2f} |\n"\ # .format(*b) line = f"| {b[0]:^3d} | {b[1]:<15.15s} | {b[2]:<15.15s} | {b[3]:<20.20s} | {b[4]:^12.12s} | {b[5]:<4d} | {b[6]:>7.2f} |\n" result += line sum += b[-1] l = len(line) total_str = f"Total: {sum:8.2f} \n" prefix = " " * (l - len(total_str)) result += prefix + total_str result += prefix + f"VAT : {sum:8.2f} \n" return result if __name__ == '__main__': print(print_bond(books))
python
import asyncio import discord from discord.ext import commands from otherscipts.helpers import create_mute_role class Moderator(commands.Cog): def __init__(self, bot, theme_color): self.bot = bot self.theme_color = theme_color self.warn_count = {} @commands.command(name="warn") @commands.has_guild_permissions(kick_members=True) async def warn(self, ctx, user: discord.Member = None, *, reason=None): if user is None or reason is None: await ctx.send("Insufficient arguments.") elif ctx.author.top_role.position <= user.top_role.position: await ctx.send("You cannot warn this user because their role is higher than or equal to yours.") else: print(f"Warning user {user.name} for {reason}...") if str(user) not in self.warn_count: self.warn_count[str(user)] = 1 else: self.warn_count[str(user)] += 1 embed = discord.Embed( title=f"{user.name} has been warned", color=self.theme_color) embed.add_field(name="Reason", value=reason) embed.add_field(name="This user has been warned", value=f"{self.warn_count[str(user)]} time(s)") await ctx.send(content=None, embed=embed) @commands.command(name="clearwarn", aliases=['cw', 'removewarns', 'rw']) @commands.has_guild_permissions(kick_members=True) async def clearwarn(self, ctx, user: discord.Member = None): if user is None: self.warn_count = {} await ctx.send("Clearing all warns.") elif ctx.author.top_role.position <= user.top_role.position: await ctx.send("You cannot clear this user's warnings because their role is higher than or equal to yours.") else: self.warn_count[str(user)] = 0 await ctx.send(f"Clearing warns for {user.mention}.") @commands.command(name="warncount") async def warncount(self, ctx, user: discord.Member): if str(user) not in self.warn_count: self.warn_count[str(user)] = 0 count = self.warn_count[str(user)] await ctx.send(f"{user.mention} has been warned {count} time(s)") @commands.command(name="mute") @commands.has_guild_permissions(kick_members=True) async def mute(self, ctx, user: discord.Member = None, time: str = None): if user is None: await ctx.send("Insufficient arguments.") elif ctx.author.top_role.position <= user.top_role.position: await ctx.send("You cannot mute this user because their role is higher than or equal to yours.") else: guild = ctx.guild mute_role = None for role in guild.roles: if role.name.lower() == "muted": mute_role = role break if mute_role in user.roles: await ctx.send("This user is already muted.") else: if not mute_role: await ctx.send("This server does not have a `Muted` Role. Creating one right now.") await ctx.send("This may take some time.") mute_role = await create_mute_role(guild) if time is None: await user.add_roles(mute_role) await ctx.send(f"User {user.mention} has been muted! They cannot speak.") else: time_unit = None parsed_time = None if "s" in time: time_unit = "seconds" parsed_time = time[0:(len(time) - 1)] elif "m" in time: time_unit = "minutes" parsed_time = time[0:(len(time) - 1)] elif "h" in time: time_unit = "hours" parsed_time = time[0:(len(time) - 1)] else: time_unit = "minutes" # default to minutes if user doesn't provide a time unit parsed_time = time[0:len(time)] await user.add_roles(mute_role) await ctx.send(f"User {user.mention} has been muted for {parsed_time} {time_unit}! They cannot speak.") if time_unit == "seconds": await asyncio.sleep(int(parsed_time)) elif time_unit == "minutes": await asyncio.sleep(int(parsed_time) * 60) elif time_unit == "hours": await asyncio.sleep(int(parsed_time) * 3600) await user.remove_roles(mute_role) await ctx.send(f"User {user.mention} has been unmuted after {parsed_time} {time_unit}! They can speak now.") @commands.command(name="unmute") @commands.has_guild_permissions(kick_members=True) async def unmute(self, ctx, user: discord.Member = None): if user is None: await ctx.send("Insufficient arguments.") elif ctx.author.top_role.position <= user.top_role.position: await ctx.send("You cannot unmute this user because their role is higher than or equal to yours.") else: guild = ctx.guild mute_role = None for role in guild.roles: if role.name.lower() == "muted": mute_role = role break if mute_role in user.roles: if not mute_role: mute_role = await create_mute_role(guild) await user.remove_roles(mute_role) await ctx.send(f"User {user.mention} has been unmuted! They can now speak.") else: await ctx.send("This user was never muted.") @commands.command(name="ban") @commands.has_guild_permissions(ban_members=True) async def ban(self, ctx, user: discord.Member = None, *, reason=None): if user is None: await ctx.send("Insufficient arguments.") elif ctx.author.top_role.position <= user.top_role.position: await ctx.send("You cannot ban this user because their role is higher than or equal to yours.") else: await ctx.guild.ban(user, reason=reason) if reason: await ctx.send(f"User **{user}** has been banned for reason: **{reason}**.") else: await ctx.send(f"User **{user}** has been banned.") await user.send(f"You have been **banned** from **{ctx.guild}** server due to the following reason:\n**{reason}**") @commands.command(name="tempban") @commands.has_guild_permissions(ban_members=True) async def tempban(self, ctx, user: discord.Member = None, days: int = 1): if user is None: await ctx.send("Insufficient arguments.") elif ctx.author.top_role.position <= user.top_role.position: await ctx.send("You cannot temporarily ban this user because their role is higher than or equal to yours.") else: await ctx.guild.ban(user) await ctx.send(f"User **{user}** has been temporarily banned for **{days} day(s)**") await user.send(f"You have been **temporarily banned** from **{ctx.guild}** server for **{days} day(s)**") await asyncio.sleep(days * 86400) # convert days to seconds await ctx.guild.unban(user) await ctx.send(f"**{user}** has been unbanned after a {days} day Temp Ban.") @commands.command(name="unban") @commands.has_guild_permissions(ban_members=True) async def unban(self, ctx, username: str = None, *, reason=None): if username is None: await ctx.send("Insufficient arguments.") else: banned_users = await ctx.guild.bans() member_name, member_discriminator = username.split('#') for ban_entry in banned_users: user = ban_entry.user if (user.name, user.discriminator) == (member_name, member_discriminator): await ctx.guild.unban(user) try: if reason: await ctx.send(f"User **{username}** has been unbanned for reason: **{reason}**.") else: await ctx.send(f"User **{username}** has been unbanned.") await user.send(f"You have been **unbanned** from **{ctx.guild}** server due to the following reason:\n**{reason}**") except NameError: await ctx.send(f"{username} is has not been banned in this server.") @commands.command(name="kick") @commands.has_guild_permissions(kick_members=True) async def kick(self, ctx, user: discord.Member = None, *, reason=None): if user is None: await ctx.send("Insufficient arguments.") elif ctx.author.top_role.position <= user.top_role.position: await ctx.send("You cannot kick this user because their role is higher than or equal to yours.") else: await ctx.guild.kick(user, reason=reason) if reason: await ctx.send(f"User **{user}** has been kicked for reason: **{reason}**.") else: await ctx.send(f"User **{user}** has been kicked.") await user.send(f"You have been **kicked** from **{ctx.guild}** server due to the following reason:\n**{reason}**") @commands.command(name="lockchannel", aliases=['lock']) @commands.has_guild_permissions(manage_guild=True) async def lockchannel(self, ctx, channel: discord.TextChannel = None): if channel is None: channel = ctx.channel for role in ctx.guild.roles: if role.permissions.administrator: await channel.set_permissions(role, send_messages=True, read_messages=True) elif role.name == "@everyone": await channel.set_permissions(role, send_messages=False) await ctx.send(f"🔒The channel {channel.mention} has been locked") @commands.command(name="unlockchannel", aliases=['unlock']) @commands.has_guild_permissions(manage_guild=True) async def unlockchannel(self, ctx, channel: discord.TextChannel = None): if channel is None: channel = ctx.channel await channel.set_permissions(ctx.guild.roles[0], send_messages=True) await ctx.send(f"🔓The channel {channel.mention} has been unlocked") @commands.command(name="slowmode", aliases=['sm']) @commands.has_guild_permissions(manage_guild=True) async def setdelay(self, ctx, seconds: int): await ctx.channel.edit(slowmode_delay=seconds) await ctx.send(f"Set the slowmode in this channel to **{seconds}** seconds!")
python
""" Credit to espnet: https://github.com/espnet/espnet/blob/master/espnet2/iterators/multiple_iter_factory.py """ import logging from typing import Callable from typing import Collection from typing import Iterator import numpy as np from typeguard import check_argument_types from muskit.iterators.abs_iter_factory import AbsIterFactory class MultipleIterFactory(AbsIterFactory): def __init__( self, build_funcs: Collection[Callable[[], AbsIterFactory]], seed: int = 0, shuffle: bool = False, ): assert check_argument_types() self.build_funcs = list(build_funcs) self.seed = seed self.shuffle = shuffle def build_iter(self, epoch: int, shuffle: bool = None) -> Iterator: if shuffle is None: shuffle = self.shuffle build_funcs = list(self.build_funcs) if shuffle: np.random.RandomState(epoch + self.seed).shuffle(build_funcs) for i, build_func in enumerate(build_funcs): logging.info(f"Building {i}th iter-factory...") iter_factory = build_func() assert isinstance(iter_factory, AbsIterFactory), type(iter_factory) yield from iter_factory.build_iter(epoch, shuffle)
python
import logging import random import time from .exception import re_raisable logger = logging.getLogger(__name__) def retry(action, name, times=5): try: return action() except Exception as e: if times < 20: throttle_seconds = min(pow(2, times * random.uniform(0.1, 0.2)), 30) logger.warn('Retrying "{0}" in {1} seconds: {2}'.format(name, throttle_seconds, str(e))) time.sleep(throttle_seconds) return retry(action, times + 1) re_raisable() raise e
python
import os import sys import logging from typing import List, Type from intents.language_codes import LanguageCode, LANGUAGE_CODES, FALLBACK_LANGUAGE logger = logging.getLogger(__name__) def agent_language_folder(agent_cls: Type["intents.model.agent.Agent"]) -> str: main_agent_package_name = agent_cls.__module__.split('.')[0] main_agent_package = sys.modules[main_agent_package_name] if '__path__' not in main_agent_package.__dict__: # TODO: try workdir or something... logger.warning("Agent %s doesn't seem to be defined within a package. Language data will not be loaded.", agent_cls) return [], [] agent_folder = main_agent_package.__path__[0] language_folder = os.path.join(agent_folder, 'language') if not os.path.isdir(language_folder): raise ValueError(f"No language folder found for agent {agent_cls} (expected: {language_folder})") return language_folder def agent_supported_languages(agent_cls: Type["intents.model.agent.Agent"]) -> List[LanguageCode]: if agent_cls.languages: return agent_cls.languages result = [] language_folder = agent_language_folder(agent_cls) for f in os.scandir(language_folder): if f.is_dir() and not f.name.startswith('.') and not f.name.startswith('_'): if f.name in LANGUAGE_CODES: result.append(LanguageCode(f.name)) else: logger.warning("Unrecognized language code: '%s' (must be one of %s). Skipping language data.", f.name, LANGUAGE_CODES) return result def match_agent_language(agent_cls: Type["intents.model.agent.Agent"], language: LanguageCode) -> LanguageCode: """ Return a Language Code among the ones supported by Agent that matches `language`. If Agent supports `language` directly, `language` is returned as it is. Otherwise, look for a viable fallback language (e.g. :class:`LanguageCode.ENGLISH` is a viable fallback for :class:`LanguageCode.ENGLISH_US`). Raise `KeyError` if there is no viable language in Agent that matches the input one. Args: agent_cls: An Agent class language: The Language code to match in Agent Returns: A language code that matches `language` and that is supported by Agent Raises: KeyError: If Agent doesn't support `language` or one of its fallbacks """ # TODO: update export procedures to use this agent_languages = agent_supported_languages(agent_cls) if language in agent_languages: return language for fallback in FALLBACK_LANGUAGE[language]: if fallback in agent_languages: return fallback raise KeyError(f"Agent {agent_cls} does not support language {language}")
python
import io, os # CHANGE THIS to the path to your TN file, it might be in your downloads directory filename = "C:/Users/benja/Documents/uwgit/en_tn/en_tn_02-EXO.tsv" os.rename(filename,filename.replace('.tsv','.old')) filename = filename.replace('.tsv','.old') with io.open(filename, encoding='utf8') as f: with io.open(filename.replace('.old','.tsv'), 'w', encoding='utf8', newline='\n') as newFile: for line in f: ls = line.split('\t') if "-" in ls[4] and "[[rc:" not in ls[8]: newOcNo = ls[8].rstrip() + " (See: [[rc://en/ta/man/translate/" + ls[4].strip() + "]])\n" ls[8] = newOcNo newLine = "\t".join(ls) newFile.write(newLine)
python
""" Provides classes that take protocol requests, send that request to the server, and write a particular genomics file type with the results. """ from __future__ import division from __future__ import print_function from __future__ import unicode_literals import collections import pysam import ga4gh.datamodel.reads as reads import ga4gh.protocol as protocol class AbstractConverter(object): """ Abstract base class for converter classes """ def __init__( self, container, objectIterator, outputFile, binaryOutput): self._container = container self._objectIterator = objectIterator self._outputFile = outputFile self._binaryOutput = binaryOutput ############################################################################## # SAM ############################################################################## class SamException(Exception): """ Something that went wrong during converting a SAM file """ class SamConverter(object): """ Converts a requested range from a GA4GH server into a SAM file. """ def __init__( self, client, readGroupId=None, referenceId=None, start=None, end=None, outputFileName=None, binaryOutput=False): self._client = client self._readGroup = self._client.get_read_group(readGroupId) self._reference = self._client.get_reference(referenceId) self._start = start self._end = end self._outputFileName = outputFileName self._binaryOutput = binaryOutput def convert(self): header = self._getHeader() targetIds = self._getTargetIds(header) # pysam can't write to file streams (except for stdout) # http://pysam.readthedocs.org/en/latest/usage.html#using-streams if self._binaryOutput: flags = "wb" else: flags = "wh" # h for header fileString = "-" if self._outputFileName is not None: fileString = self._outputFileName alignmentFile = pysam.AlignmentFile(fileString, flags, header=header) iterator = self._client.search_reads( [self._readGroup.id], self._reference.id, self._start, self._end) for read in iterator: alignedSegment = SamLine.toAlignedSegment(read, targetIds) alignmentFile.write(alignedSegment) alignmentFile.close() def _getHeader(self): # Create header information using self._reference header = { 'HD': {'VN': '1.0'}, 'SQ': [{ 'LN': self._reference.length, 'SN': self._reference.name }] } return header def _getTargetIds(self, header): # this seems to be how pysam sets the target ids targetIds = collections.defaultdict(int) targetId = 0 if 'SQ' in header: headerLines = header['SQ'] for headerLine in headerLines: refName = headerLine['SN'] targetIds[refName] = targetId targetId += 1 return targetIds class SamLine(object): """ Methods for processing a line in a SAM file """ _encoding = 'utf8' # see tables in SAM spec, section 1.5 _tagReservedFieldPrefixes = set(["X", "Y", "Z", ]) _tagIntegerFields = set([ "AM", "AS", "CM", "CP", "FI", "H0", "H1", "H2", "HI", "IH", "MQ", "NH", "NM", "OP", "PQ", "SM", "TC", "UQ", ]) _tagStringFields = set([ "BC", "BQ", "CC", "CO", "CQ", "CS", "CT", "E2", "FS", "LB", "MC", "MD", "OQ", "OC", "PG", "PT", "PU", "QT", "Q2", "R2", "RG", "RT", "SA", "U2", ]) _tagIntegerArrayFields = set(["FZ", ]) def __init__(self): raise SamException("SamLine can't be instantiated") @classmethod def toAlignedSegment(cls, read, targetIds): ret = pysam.AlignedSegment() # QNAME ret.query_name = read.fragment_name.encode(cls._encoding) # SEQ ret.query_sequence = read.aligned_sequence.encode(cls._encoding) # FLAG ret.flag = cls.toSamFlag(read) # RNAME if read.alignment is not None: refName = read.alignment.position.reference_name ret.reference_id = targetIds[refName] # POS if read.alignment is None: ret.reference_start = 0 else: ret.reference_start = int(read.alignment.position.position) # MAPQ if read.alignment is not None: ret.mapping_quality = read.alignment.mapping_quality # CIGAR ret.cigar = cls.toCigar(read) # RNEXT if read.next_mate_position is None: ret.next_reference_id = -1 else: nextRefName = read.next_mate_position.reference_name ret.next_reference_id = targetIds[nextRefName] # PNEXT if read.next_mate_position is None: ret.next_reference_start = -1 else: ret.next_reference_start = int(read.next_mate_position.position) # TLEN ret.template_length = read.fragment_length # QUAL ret.query_qualities = read.aligned_quality ret.tags = cls.toTags(read) return ret @classmethod def toSamFlag(cls, read): # based on algorithm here: # https://github.com/googlegenomics/readthedocs/ # blob/master/docs/source/migrating_tips.rst flag = 0 if read.number_reads == 2: flag = reads.SamFlags.setFlag( flag, reads.SamFlags.READ_PAIRED) if not read.improper_placement: flag = reads.SamFlags.setFlag( flag, reads.SamFlags.READ_PROPER_PAIR) if read.alignment is None: flag = reads.SamFlags.setFlag( flag, reads.SamFlags.READ_UNMAPPED) if read.next_mate_position.ByteSize() == 0: # cleared flag = reads.SamFlags.setFlag( flag, reads.SamFlags.MATE_UNMAPPED) if (read.alignment is not None and read.alignment.position.strand == protocol.NEG_STRAND): flag = reads.SamFlags.setFlag( flag, reads.SamFlags.READ_REVERSE_STRAND) if (read.next_mate_position is not None and read.next_mate_position.strand == protocol.NEG_STRAND): flag = reads.SamFlags.setFlag( flag, reads.SamFlags.MATE_REVERSE_STRAND) if read.read_number == -1: pass elif read.read_number == 0: flag = reads.SamFlags.setFlag( flag, reads.SamFlags.FIRST_IN_PAIR) elif read.read_number == 1: flag = reads.SamFlags.setFlag( flag, reads.SamFlags.SECOND_IN_PAIR) else: flag = reads.SamFlags.setFlag( flag, reads.SamFlags.FIRST_IN_PAIR) flag = reads.SamFlags.setFlag( flag, reads.SamFlags.SECOND_IN_PAIR) if read.secondary_alignment: flag = reads.SamFlags.setFlag( flag, reads.SamFlags.SECONDARY_ALIGNMENT) if read.failed_vendor_quality_checks: flag = reads.SamFlags.setFlag( flag, reads.SamFlags.FAILED_QUALITY_CHECK) if read.duplicate_fragment: flag = reads.SamFlags.setFlag( flag, reads.SamFlags.DUPLICATE_READ) if read.supplementary_alignment: flag = reads.SamFlags.setFlag( flag, reads.SamFlags.SUPPLEMENTARY_ALIGNMENT) return flag @classmethod def toCigar(cls, read): cigarTuples = [] if read.alignment is not None: for gaCigarUnit in read.alignment.cigar: operation = reads.SamCigar.ga2int(gaCigarUnit.operation) length = int(gaCigarUnit.operation_length) cigarTuple = (operation, length) cigarTuples.append(cigarTuple) return tuple(cigarTuples) @classmethod def _parseTagValue(cls, tag, value): if tag[0] in cls._tagReservedFieldPrefixes: # user reserved fields... not really sure what to do here return protocol.getValueFromValue(value.values[0]) \ .encode(cls._encoding) elif tag in cls._tagIntegerFields: return int(protocol.getValueFromValue(value.values[0])) elif tag in cls._tagStringFields: return protocol.getValueFromValue(value.values[0]) \ .encode(cls._encoding) elif tag in cls._tagIntegerArrayFields: return [int(integerString) for integerString in value] else: raise SamException("unrecognized tag '{}'".format(tag)) @classmethod def toTags(cls, read): tags = [] for tag, value in read.info.items(): val = cls._parseTagValue(tag, value) tags.append((tag.encode(cls._encoding), val)) retval = tuple(tags) return retval ############################################################################## # VCF ############################################################################## class VcfException(Exception): pass class VcfConverter(AbstractConverter): """ Converts the Variants represented by a SearchVariantsRequest into VCF format using pysam. """ def _writeHeader(self): variantSet = self._container # TODO convert this into pysam types and write to the output file. # For now, just print out some stuff to demonstrate how to get the # attributes we have. print("ID = ", variantSet.id) print("Dataset ID = ", variantSet.datasetId) print("Metadata = ") for metadata in variantSet.metadata: print("\t", metadata) def _writeBody(self): for variant in self._objectIterator: # TODO convert each variant object into pysam objects and write to # the output file. For now, just print the first variant and break. print(variant) break def convert(self): """ Run the conversion process. """ # TODO allocate the pysam VCF object which can be used for the # conversion process. See the convert method for ga2sam above. self._writeHeader() self._writeBody()
python
# -*- coding: utf-8 -*- """ Created on Sat Mar 28 19:43:57 2020 @author: Alok """ class Info: def __init__(self,id_no,name,mobile,marks): self.id_no=id_no self.name=name self.mobile=mobile self.marks=marks def merge_sort(arr):#time comp nlogn if(len(arr)>1): m = len(arr)//2 L = arr[:m] R = arr[m:] print(L) print(R) merge_sort(L) merge_sort(R) i = j = k = 0 while(i<len(L) and j<len(R)): if(L[i].marks < R[j].marks): arr[k] = L[i] i+=1 else: arr[k] = R[j] j+=1 k+=1 while(i<len(L)): arr[k] = L[i] i+=1 k+=1 while(j<len(R)): arr[k] = R[j] j+=1 k+=1 return arr
python
from flask import Flask, render_template, request app = Flask(__name__) @app.route('/', methods=['GET', 'POST']) def index(): name = None if request.method == 'POST' and 'name' in request.form: name = request.form['name'] return render_template('index.html', name=name) if __name__ == '__main__': app.run(debug=True)
python
import matplotlib.pyplot as plt import numpy as np # save_zangle_width_file = '/home/ljm/NiuChuang/AuroraObjectData/zangle_width/agw_tr1058_te38044_arc_line (copy 1).txt' save_zangle_width_file = '/home/ljm/NiuChuang/AuroraObjectData/zangle_width/agw_tr1058_te38044_arc_cnd2_line.txt' f = open(save_zangle_width_file, 'r') lines = f.readlines() num_arc = len(lines) zenith_angles = [] arc_widths = [] for a in range(num_arc): line = lines[a] angle = float(line.split()[0]) width = float(line.split()[1][:-1]) zenith_angles.append(angle) arc_widths.append(width) plot_size_h = 6 plot_size_w = 8 fig_id = 1 plt.figure(fig_id, figsize=[plot_size_w, plot_size_h]) fig_id += 1 plt.scatter(zenith_angles, arc_widths, s=2) plt.title("Zenith angle range: -90~90") zenith_angles = np.array(zenith_angles) arc_widths = np.array(arc_widths) thresh_a = 45 thresh_w = 100 index_a = np.abs(zenith_angles) <= thresh_a index_w = arc_widths <= thresh_w index = index_a * index_w zenith_angles_s = zenith_angles[index] arc_widths_s = arc_widths[index] plt.figure(fig_id, figsize=[plot_size_w, plot_size_h]) fig_id += 1 plt.scatter(zenith_angles_s, arc_widths_s, s=4, c='g') # plt.title("Zenith angle range: -{}~{}".format(thresh_a, thresh_a)) plt.ylabel('Width (km)') plt.xlabel('Zenith angle') # mean curve. angle_range = list(range(-thresh_a, thresh_a+1)) # zenith_angles_s_int = np.int(zenith_angles_s) arc_widths_s_mean = np.zeros((len(angle_range))) for a in range(len(angle_range)): angle = angle_range[a] index_l = zenith_angles_s >= angle index_r = zenith_angles_s < angle+1 index = index_l * index_r arc_widths_s_a = arc_widths_s[index] arc_widths_s_mean[a] = arc_widths_s_a.mean() # arc_widths_s_mean[a] = (arc_widths_s_a.max() + arc_widths_s_a.min()) / 2 plt.plot(angle_range, arc_widths_s_mean, c='b') mean_point = -8.9 print("mean zenith angle:", mean_point) plt.plot([mean_point, mean_point], [0, thresh_w], linestyle='--', linewidth=3, color='blue') plt.savefig('width_distribution_cnd2.png', dpi=300, bbox_inches='tight', transparent=True) # Compute the mean and standard deviation. thresh_a = 15 index_ss_r = zenith_angles_s <= mean_point + thresh_a index_ss_l = zenith_angles_s >= mean_point - thresh_a index_ss = index_ss_l*index_ss_r zenith_angles_ss = zenith_angles_s[index_ss] arc_widths_ss = arc_widths_s[index_ss] arc_ss_mean = arc_widths_ss.mean() arc_ss_std = np.std(arc_widths_ss, ddof=1) print("mean:", arc_ss_mean) print("std::", arc_ss_std) plt.show()
python
import scrapy class DmozSpider(scrapy.Spider): name = "dmoz" allowed_domains = ["dmoz.org"] start_urls = [ "http://www.dmoz.org/Computers/Programming/Languages/Python/Books/", "http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/" ] def parse(self, response): for sel in response.xpath('//ul/li'): item = DmozItem() item['title'] = sel.xpath('a/text()').extract() item['link'] = sel.xpath('a/@href').extract() item['desc'] = sel.xpath('text()').extract() yield item def parse_items(self, response): hxs = HtmlXPathSelector(response) titles = hxs.select('//span[@class="pl"]') items = [] for titles in titles: item = CraigslistSampleItem() item ["title"] = titles.select("a/text()").extract() item ["link"] = titles.select("a/@href").extract() items.append(item) return(items)
python
import numpy as np import cv2 import matplotlib.pyplot as plt from sklearn.cluster import DBSCAN from PIL import Image def to_pil(img): ''' Transforms a 3 dimentional matrix into a PIL image ''' return Image.fromarray(img.astype('uint8'), 'RGB') def to_cv2(img): open_cv_image = np.array(img) # Convert RGB to BGR return open_cv_image[:, :, ::-1].copy() def binary_to_rgb(arr): ''' Transforms a binary image into a RGB image ''' arr *= 255 return np.repeat(arr[:, :, np.newaxis], 3, axis=2) def store_images(original,clustered): ''' Converts and Stores the images locally ''' (to_pil(original)).save("Original.png") (to_pil(clustered)).save("Cluster.png") def run_clustering(file_name): ''' Run the clustering algorithm, requires the name of the image to be opened, returns the clustered image ''' img = cv2.imread(file_name) labimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img_grey = cv2.cvtColor(labimg, cv2.COLOR_GRAY2BGR) labimg = cv2.cvtColor(img_grey, cv2.COLOR_BGR2LAB) n = 0 while(n<0): # change to other values for less downscale labimg = cv2.pyrDown(labimg) n = n+1 rows, cols, chs = labimg.shape # A higher eps means more changes are detected. db = DBSCAN(eps=1, min_samples=4, metric = 'euclidean',algorithm ='auto') indices = np.dstack(np.indices(labimg.shape[:2])) xycolors = np.concatenate((labimg, indices), axis=-1) feature_image = np.reshape(xycolors, [-1,5]) db.fit(feature_image) labels = db.labels_ labels[labels < 0.5] = 0 # set pixels with value < threshold to 0 labels[labels >= 0.5] = 1 # set pixels with value >= threshold to 1 img_cluster = np.reshape(labels, [rows, cols]) img_cluster = binary_to_rgb(img_cluster) #fig, ax = plt.subplots(1, 2, figsize=(20, 20)) #ax[0].imshow(img) #ax[1].imshow(img_cluster) #Store the images #store_images(img,img_cluster) return img_cluster def run_clustering_image_cv2(cv2_image): ''' Run the clustering algorithm, requires a cv2 image, returns the clustered image ''' img = cv2_image labimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img_grey = cv2.cvtColor(labimg, cv2.COLOR_GRAY2BGR) labimg = cv2.cvtColor(img_grey, cv2.COLOR_BGR2LAB) n = 0 while(n<0): # change to other values for less downscale labimg = cv2.pyrDown(labimg) n = n+1 rows, cols, chs = labimg.shape # A higher eps means more changes are detected. db = DBSCAN(eps=1, min_samples=4, metric = 'euclidean',algorithm ='auto') indices = np.dstack(np.indices(labimg.shape[:2])) xycolors = np.concatenate((labimg, indices), axis=-1) feature_image = np.reshape(xycolors, [-1,5]) db.fit(feature_image) labels = db.labels_ labels[labels < 0.5] = 0 # set pixels with value < threshold to 0 labels[labels >= 0.5] = 1 # set pixels with value >= threshold to 1 img_cluster = np.reshape(labels, [rows, cols]) img_cluster = binary_to_rgb(img_cluster) #fig, ax = plt.subplots(1, 2, figsize=(20, 20)) #ax[0].imshow(img) #ax[1].imshow(img_cluster) #Store the images #store_images(img,img_cluster) return img_cluster def run_clustering_image_pil(pil_image): ''' Run the clustering algorithm, requires a PIL image, returns the clustered image ''' img = to_cv2(pil_image) labimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img_grey = cv2.cvtColor(labimg, cv2.COLOR_GRAY2BGR) labimg = cv2.cvtColor(img_grey, cv2.COLOR_BGR2LAB) n = 0 while(n<0): # change to other values for less downscale labimg = cv2.pyrDown(labimg) n = n+1 rows, cols, chs = labimg.shape # A higher eps means more changes are detected. db = DBSCAN(eps=1, min_samples=4, metric = 'euclidean',algorithm ='auto') indices = np.dstack(np.indices(labimg.shape[:2])) xycolors = np.concatenate((labimg, indices), axis=-1) feature_image = np.reshape(xycolors, [-1,5]) db.fit(feature_image) labels = db.labels_ labels[labels < 0.5] = 0 # set pixels with value < threshold to 0 labels[labels >= 0.5] = 1 # set pixels with value >= threshold to 1 img_cluster = np.reshape(labels, [rows, cols]) img_cluster = binary_to_rgb(img_cluster) #fig, ax = plt.subplots(1, 2, figsize=(20, 20)) #ax[0].imshow(img) #ax[1].imshow(img_cluster) #Store the images #store_images(img,img_cluster) return img_cluster
python
#!/usr/bin/env python3 from setuptools import setup from setuptools import find_packages from codecs import open from os import path import sys import shutil import os from ly_bar_incr import __version__ here = path.abspath(path.dirname(__file__)) with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name='ly-bar-incr', version=__version__, description=('Increment bar numbers in comments and bar number checks of a' ' lilypond file.'), long_description=long_description, url='https://github.com/rickh94/ly-bar-incr', author='Rick Henry', author_email='[email protected]', license='MIT', python_requires='>=3.4', install_requires=['click'], py_modules=['ly_bar_incr'], entry_points={ 'console_scripts': [ 'ly-bar-incr=ly_bar_incr:cli', ], }, ) # # # install man pages # if 'install' in sys.argv: # man_path = '/usr/share/man/man1/' # if os.path.exists(man_path): # print("Installing man pages") # man_page = "doc/ly-bar-incr.1.gz" # shutil.copy2(man_page, man_path) # os.chmod(man_path + 'ly-bar-incr.1.gz', int('444', 8))
python
#!/usr/bin/env python3 import pathfinder as pf import math if __name__ == "__main__": points = [ pf.Waypoint(-4, -1, math.radians(-45.0)), pf.Waypoint(-2, -2, 0), pf.Waypoint(0, 0, 0), ] info, trajectory = pf.generate( points, pf.FIT_HERMITE_CUBIC, pf.SAMPLES_HIGH, dt=0.05, # 50ms max_velocity=1.7, max_acceleration=2.0, max_jerk=60.0, ) # Do something with the new Trajectory...
python
import os import subprocess import yaml def run_command( command, shell=True, env=None, execute="/bin/sh", return_codes=None, ): """Run a shell command. The options available: * ``shell`` to be enabled or disabled, which provides the ability to execute arbitrary stings or not. if disabled commands must be in the format of a ``list`` * ``env`` is an environment override and or manipulation setting which sets environment variables within the locally executed shell. * ``execute`` changes the interpreter which is executing the command(s). * ``return_codes`` defines the return code that the command must have in order to ensure success. This can be a list of return codes if multiple return codes are acceptable. :param command: String :param shell: Boolean :param env: Dictionary :param execute: String :param return_codes: Integer :returns: Truple """ if env is None: env = os.environ stdout = subprocess.PIPE if return_codes is None: return_codes = [0] stderr = subprocess.PIPE process = subprocess.Popen( command, stdout=stdout, stderr=stderr, executable=execute, env=env, shell=shell, ) output, error = process.communicate() if process.returncode not in return_codes: return error, False else: return output, True def dump_yaml(file_path, data): """Dump data to a file. :param file_path: File path to dump data to :type file_path: String :param data: Dictionary|List data to dump :type data: Dictionary|List """ with open(os.path.abspath(os.path.expanduser(file_path)), "w") as f: yaml.safe_dump(data, f, default_flow_style=False) return file_path class ClientStatus(object): """Context manager for transmitting client status.""" def __init__(self, socket, job_id, ctx): """Initialize the UNIX socket connect context manager.""" self.ctx = ctx self.job_id = job_id self.job_state = ctx.nullbyte self.info = ctx.nullbyte self.socket = socket def start_processing(self): self.ctx.socket_multipart_send( zsocket=self.socket, msg_id=bytes(self.encode_string(item=self.job_id)), control=self.ctx.job_processing, ) @staticmethod def encode_string(item): """Inspect a given item and if it is a string type, encode it. :param item: Item to inspect, assumes item may be string type :type item: <ANY> :returns: String|<ANY> """ if isinstance(item, str): return item.encode() else: return item def __enter__(self): """Upon enter, return the context manager object for future updates. :returns: Object """ return self def __exit__(self, *args, **kwargs): """Upon exit, send a final status message.""" self.ctx.socket_multipart_send( zsocket=self.socket, msg_id=self.job_id, control=self.job_state, info=self.info, )
python
from random import random, randrange def ranksb ( N, K ) : if N < K : raise Exception, "N must be no less than K" if K == 0 : return [ ] L2 = K + 1 R = L2 A = K * [ 0 ] while 1 : M = 1 + int ( random ( ) * N ) I = 1 + ( M - 1 ) % K breakthencontinue = 0 if A [ I - 1 ] != 0 : while M != A [ I - 1 ] / L2 : LINK = A [ I - 1 ] % L2 if LINK == 0 : while 1 : R -= 1 if R == 0 : return map ( lambda a : a / L2, A ) if A [ R - 1 ] <= 0 : A [ I - 1 ] += R I = R A [ I - 1 ] = L2 * M break breakthencontinue = 1 break I = LINK else : continue if breakthencontinue : continue A [ I - 1 ] = L2 * M if __name__ == "__main__" : from fpformat import fix from time import time counts = { } n , k = 105, 90 sampleSize = 1000 timeStart = time ( ) for s in xrange ( sampleSize ) : a = ranksb ( n, k ) for i in a : if i in counts : counts [ i ] += 1 else : counts [ i ] = 1 print "Time to generate %i %i-subsets from set of size %i: %s seconds" \ % ( sampleSize, k, n, fix ( time ( ) - timeStart, 3 ) ) keys = counts . keys ( ) keys . sort ( ) totalCount = 0 idealCount = sampleSize * k / n ChiSquare = 0 print "Counts of occurrences of each sample element, " print "and difference between 'ideal' count and actual" for key in keys : print key, counts [ key ], abs ( counts [ key ] - idealCount ) totalCount += counts [ key ] ChiSquare +=float ( pow ( counts [ key ] - idealCount, 2 ) ) / idealCount print "Chi-squared test of uniformity: %s on %i d.f." % ( fix ( ChiSquare, 3), n - 1 )
python
import machine import utime import ntptime from . import config as cfg rtc = machine.RTC() def set_rtc_from_ntp(config): try: mytime = utime.localtime(ntptime.time() + int(config['tz_offset'])) except: mytime = utime.localtime() year, month, day, hour, minute, second, weekday, yearday = mytime rtc.datetime((year, month, day, weekday, hour, minute, second, 0)) print("INFO: Set RTC to {}-{}-{} {:02}:{:02}:{:02}" .format(year, month, day, hour, minute, second))
python
""" Objetivo: Resolver questão 2 do segundo laboratorio. """ def fibonachi(n): #n é o ordem do elemento, por exemplo se n=1 retorna o primeiro termo da serie if n == 1 or n == 0: return 0 # primeiro elemento é 0 elif n == 2: return 1 # segundo elemento é 1 else: f_anterior = 0 f_atual = 1 f_aux = 0 for c in range(0, n - 2): # (n-2) para compensar o fato da serie iniciar com 0 e 1 f_aux = f_atual f_atual = f_atual + f_anterior f_anterior = f_aux return f_atual # terceiro ou mais elemento é calculado resultado = fibonachi(9) print(resultado)
python
''' CIS 122 Fall 2019 Assignment 7 Author: Zoe Turnbull Partner: Description: List manager program. ''' # VARIABLES list_var = [] list_cmd = ["Add", "Delete", "List", "Clear"] list_cmd_desc = ["Add to list.", "Delete Information.", "List information.", "Clear list."] left = True right = False # FUNCTIONS def cmd_help(): print("*** Available Commands ***") for item in list_cmd: item_num = list_cmd.index(item) print(pad_right(item, (10 - get_max_list_item_size(item))) + list_cmd_desc[item_num]) print("Empty to exit.") def cmd_add(t): while True: add_data = input("Enter information (empty to stop): ").strip() if add_data == '': break else: list_var.append(add_data) print("Added, item count = " + str(len(list_var))) return list_var def cmd_delete(t): while True: for item in list_var: item_num = list_var.index(item) print(pad_right(str(item_num), 2) + str(item)) print() del_data = input("Enter number to delete (empty to stop): ").strip() if del_data == '': break elif del_data.isdigit() == False: print("Must be digit.") print() else: del_data = int(del_data) if (len(list_var) - 1) < del_data: print("Invalid input") print() elif len(list_var) >= del_data: if len(list_var) > 0: del list_var[del_data] elif len(list_var) == 0: print("All items deleted.") break def cmd_list(t): print("List contains " + str(len(list_var)) + " item(s).") for item in list_var: print(item) def cmd_clear(t): print(str(len(list_var)) + " item(s) removed, list empty.") list_var.clear() def get_max_list_item_size(t): max_size = len(t) return max_size def pad_string(data, size, direction = left, character = " "): data = str(data.strip()) if direction == left: padded_string = str(character * size) + str(data) return padded_string elif direction == right: padded_string = str(data) + str(character * size) return padded_string def pad_left(data, size, character = " "): direction = left padded_string = (pad_string(data, size, direction, character)) return padded_string def pad_right(data, size, character = " "): direction = right padded_string = (pad_string(data, size, direction, character)) return padded_string # CODE while True: cmd = str(input("Enter a command (? for help): ").strip().lower()) if cmd == '': print("Goodbye!") break elif cmd == '?': cmd_help() print() elif cmd == 'add': cmd_add(list_var) print() elif cmd == 'delete' or cmd == 'del': cmd_delete(list_var) print() elif cmd == 'list': cmd_list(list_var) print() elif cmd == 'clear': cmd_clear(list_var) print() else: print("Unknown command.") print()
python
from jellylib.error import Error EOF = object() Newlines = frozenset("\n\r") LineEnd = frozenset(['\n', '\r', EOF]) Whitespaces = frozenset(" \t") Spaces = frozenset("\n\r\t ") LowerLetter = frozenset("abcdefghijklmnopqrstuvwxyz") UpperLetter = frozenset("ABCDEFGHIJKLMNOPQRSTUVWXYZ") Digit = frozenset("0123456789") Printables = frozenset(map(chr, range(32, 127))) Graphicals = frozenset(map(chr, range(33, 127))) Punctuation = Graphicals.difference(LowerLetter | UpperLetter | Digit) class SourceOpts: def __init__(self, tab_size): self.tab_size = tab_size class SourceFile: def __init__(self, filename, opts): self.filename = filename self.data = [] self.lines = None self.opts = opts def feed(self, chr_seq): self.data.extend(chr_seq) def compare_pos(self, pos1, pos2): return pos1 == pos2 def loc(self, pos): return SourceLoc(self, pos, pos) def advance_pos(self, pos): return pos + 1 def at_pos(self, idx): return self.data[idx] def get_span(self): return (self, 0, len(self.data)) def get_line_col_info(self, pos): if not self.lines: self._fill_line_info() line = self._bin_search_line(pos) p = self.lines[line] col = 0 while p < pos: ch = self.data[p] if ch == '\t': col = (col + self.opts.tab_size) // self.opts.tab_size * self.opts.tab_size elif ch in '\n\r': pass else: col += 1 p += 1 return line + 1, col + 1 def _bin_search_line(self, pos): begin = 0 end = len(self.lines) while end - begin > 1: mid = (end + begin) // 2 if self.lines[mid] > pos: end = mid else: begin = mid return begin def _fill_line_info(self): self.lines = [0] state = 0 for i, ch in enumerate(self.data): if (state == 1) or (state == 2 and ch != '\n'): self.lines.append(i) state = 0 if ch == '\n': state = 1 elif ch == '\r': state = 2 class SourceLoc: def __init__(self, file, begin:int, end:int): self.file = file self.begin = begin self.end = end def to(self, end): return SourceLoc(self.file, self.begin, end.end) def line(self): line, col = self.file.get_line_col_info(self.begin) return line def filename(self): return self.file.filename def __str__(self): cl_info = None if self.begin == self.end: line, col = self.file.get_line_col_info(self.begin) cl_info = "line {line}, col {col}".format(line=line, col=col) else: line1, col1 = self.file.get_line_col_info(self.begin) line2, col2 = self.file.get_line_col_info(self.end) cl_info = "{line1},{col1}:{line2},{col2}".format(line1=line1, col1=col1, line2=line2, col2=col2) if self.file.filename: return "{file}({loc})".format(file=self.file.filename, loc=cl_info) else: return cl_info class ArtificialSource: def __init__(self, loc): self.myloc = loc self.data = [] def feed(self, chr_seq): self.data.extend(chr_seq) def compare_pos(self, pos1, pos2): return pos1 == pos2 def loc(self, pos): return self.myloc def advance_pos(self, pos): return pos + 1 def at_pos(self, idx): return self.data[idx] def get_span(self): return (self, 0, len(self.data)) class SourceSpans: def __init__(self): self.spans = [] def add_span(self, provider, begin, end): self.spans.append((provider, begin, end)) def add_seq(self, loc, seq): src = ArtificialSource(loc) src.feed(seq) self.spans.append(src.get_span()) def loc(self, pos): return self.spans[pos[0]][0].loc(pos[1]) def compare_pos(self, pos1, pos2): pos1 = self.skip_empty(pos1) pos2 = self.skip_empty(pos2) return pos1 == pos2 def at_pos(self, pos): pos = self.skip_empty(pos) return self.spans[pos[0]][0].at_pos(pos[1]) def advance_pos(self, pos): span = self.spans[pos[0]] if span[0].compare_pos(pos[1], span[2]): pos = (pos[0] + 1, self.spans[pos[0] + 1][1]) else: pos = (pos[0], span[0].advance_pos(pos[1])) return self.skip_empty(pos) def skip_empty(self, pos): while True: span = self.spans[pos[0]] if span[0].compare_pos(pos[1], span[2]) and pos[0] < len(self.spans) - 1: pos = (pos[0] + 1, self.spans[pos[0] + 1][1]) else: return pos def begin_pos(self): return (0, self.spans[0][1]) def end_pos(self): return (len(self.spans) - 1, self.spans[-1][2]) def get_span(self): return self, self.begin_pos(), self.end_pos() class InputStream: def __init__(self, provider, begin:int, end:int): self.provider = provider self.begin = begin self.end = end self.cur = begin def get_span(self, begin, end): return (self.provider, begin, end) def tell(self): return self.cur def rewind(self, pos): self.cur = pos def reset(self): self.cur = self.begin def loc(self): return self.provider.loc(self.cur) def peek(self): if self.provider.compare_pos(self.cur, self.end): return EOF return self.provider.at_pos(self.cur) def advance(self): if self.is_eof(): return self.cur = self.provider.advance_pos(self.cur) def is_eof(self): return self.peek() is EOF class ParseError(Error): def __init__(self, *args): super().__init__(*args) class Parser: def __init__(self): self.stream = None def set_source(self, source): self.stream = InputStream(*source.get_span()) def set_stream(self, stream): self.stream = stream def peek(self): return self.stream.peek() def is_eof(self): return self.stream.is_eof() def loc(self): return self.stream.loc() def advance(self): self.stream.advance() def take(self): ch = self.stream.peek() if ch is EOF: return EOF self.stream.advance() return ch def tell(self): return self.stream.tell() def rewind(self, pos): self.stream.rewind(pos) def get_span(self, begin, end): return self.stream.get_span(begin, end) def expect(self, ch): if self.peek() != ch: if ch == EOF: self.report("unexpected character") else: self.report("expected '{char}'".format(char=ch)) self.take() def report(self, message, loc=None): if not loc: loc = self.loc() raise ParseError(loc, message) def parse_string(source): p = Parser() p.set_source(source) s = [] while not p.is_eof(): s.append(p.take()) return ''.join(s)
python
from typing import Callable import pytest from django.db import connection from ..models import ( AuditLogEntry, MyAuditLoggedModel, MyConvertedToAuditLoggedModel, MyManuallyAuditLoggedModel, MyNoLongerAuditLoggedModel, MyNoLongerManuallyAuditLoggedModel, ) @pytest.mark.usefixtures("db", "audit_logging_context") def test_insert_is_audit_logged() -> None: """ Test that the audit logging context manager works and that we can insert data, and that the insert is audit logged. """ model = MyAuditLoggedModel.objects.create(some_text="Some text") assert model.audit_logs.count() == 1 log_entry = model.audit_logs.get() assert log_entry.changes == {"id": model.id, "some_text": "Some text"} assert log_entry.log_object == model @pytest.mark.usefixtures("db", "audit_logging_context") def test_insert_is_audit_logged_on_converted_model() -> None: """ Test that the audit logging context manager works and that we can insert data, and that the insert is audit logged. """ model = MyConvertedToAuditLoggedModel.objects.create(some_text="Some text") assert model.audit_logs.count() == 1 log_entry = model.audit_logs.get() assert log_entry.changes == {"id": model.id, "some_text": "Some text"} assert log_entry.log_object == model @pytest.mark.usefixtures("db", "audit_logging_context") def test_insert_is_not_audit_logged_on_removed_model() -> None: """ Test that the audit logging context manager works and that we can insert data, and that the insert is audit logged. """ assert AuditLogEntry.objects.count() == 0 MyNoLongerAuditLoggedModel.objects.create(some_text="Some text") assert AuditLogEntry.objects.count() == 0 @pytest.mark.usefixtures("db", "audit_logging_context", "require_migrations") def test_insert_is_audit_logged_on_manual_model() -> None: """ Test that the audit logging context manager works and that we can insert data, and that the insert is audit logged. """ assert AuditLogEntry.objects.count() == 0 MyManuallyAuditLoggedModel.objects.create(some_text="Some text") assert AuditLogEntry.objects.count() == 1 @pytest.mark.usefixtures("db", "audit_logging_context", "require_migrations") def test_insert_is_not_audit_logged_on_removed_manual_model() -> None: """ Test that the audit logging context manager works and that we can insert data, and that the insert is audit logged. """ assert AuditLogEntry.objects.count() == 0 MyNoLongerManuallyAuditLoggedModel.objects.create(some_text="Some text") assert AuditLogEntry.objects.count() == 0 @pytest.mark.usefixtures("db", "audit_logging_context") def test_single_model_update_is_audit_logged() -> None: """ Test that the audit logging context manager works and that we can update data, and that the update is audit logged. """ model = MyAuditLoggedModel.objects.create(some_text="Some text") model.some_text = "Updated text" model.save(update_fields=["some_text"]) assert model.audit_logs.count() == 2 log_entry = model.audit_logs.latest("id") assert log_entry.action == "UPDATE" assert log_entry.changes == {"some_text": ["Some text", "Updated text"]} assert log_entry.log_object == model @pytest.mark.usefixtures("db", "audit_logging_context") def test_bulk_update_is_audit_logged() -> None: """ Test that the audit logging context manager works and that we can update data, and that the update is audit logged. """ model = MyAuditLoggedModel.objects.create(some_text="Some text") MyAuditLoggedModel.objects.filter(id=model.id).update(some_text="Updated text") assert model.audit_logs.count() == 2 log_entry = model.audit_logs.latest("id") assert log_entry.action == "UPDATE" assert log_entry.changes == {"some_text": ["Some text", "Updated text"]} assert log_entry.log_object == model @pytest.mark.usefixtures("db", "audit_logging_context") def test_sql_update_is_audit_logged() -> None: """ Test that the audit logging context manager works and that we can update data, and that the update is audit logged. """ model = MyAuditLoggedModel.objects.create(some_text="Some text") with connection.cursor() as cursor: cursor.execute( f"UPDATE {MyAuditLoggedModel._meta.db_table} SET some_text=%s WHERE id=%s", ["Updated text", model.id], ) assert model.audit_logs.count() == 2 log_entry = model.audit_logs.latest("id") assert log_entry.action == "UPDATE" assert log_entry.changes == {"some_text": ["Some text", "Updated text"]} assert log_entry.log_object == model @pytest.mark.usefixtures("db", "audit_logging_context") def test_delete_is_audit_logged() -> None: """ Test that the audit logging context manager works and that we can delete data, and that the delete is audit logged. """ assert AuditLogEntry.objects.count() == 0 model = MyAuditLoggedModel.objects.create(some_text="Some text") model_id = model.id assert model.audit_logs.count() == 1 model.delete() assert AuditLogEntry.objects.count() == 2 log_entry = AuditLogEntry.objects.latest("id") assert log_entry.action == "DELETE" assert log_entry.changes == {"id": model_id, "some_text": "Some text"} assert log_entry.log_object is None @pytest.mark.usefixtures("db", "audit_logging_context") def test_bulk_delete_is_audit_logged() -> None: """ Test that the audit logging context manager works and that we can delete data, and that the delete is audit logged. """ model = MyAuditLoggedModel.objects.create(some_text="Some text") model_id = model.id MyAuditLoggedModel.objects.filter(id=model_id).delete() assert model.audit_logs.count() == 2 log_entry = model.audit_logs.latest("id") assert log_entry.action == "DELETE" assert log_entry.changes == {"id": model_id, "some_text": "Some text"} assert log_entry.log_object is None @pytest.mark.usefixtures("db", "audit_logging_context") def test_sql_delete_is_audit_logged() -> None: """ Test that the audit logging context manager works and that we can delete data, and that the delete is audit logged. """ model = MyAuditLoggedModel.objects.create(some_text="Some text") model_id = model.id with connection.cursor() as cursor: cursor.execute( f"DELETE FROM {MyAuditLoggedModel._meta.db_table} WHERE id=%s", [model.id], ) MyAuditLoggedModel.objects.filter(id=model_id).delete() assert model.audit_logs.count() == 2 log_entry = model.audit_logs.latest("id") assert log_entry.action == "DELETE" assert log_entry.changes == {"id": model_id, "some_text": "Some text"} assert log_entry.log_object is None @pytest.mark.usefixtures("db", "audit_logging_context") def test_prefetch_audit_logged_object(django_assert_num_queries: Callable) -> None: """ Test that the audit logging context manager works and that we can insert data, and that the insert is audit logged. """ first_model = MyAuditLoggedModel.objects.create(some_text="Some text") second_model = MyAuditLoggedModel.objects.create(some_text="Some other text") audit_logs = AuditLogEntry.objects.order_by("id") # Test without prefetch, should generate 3 queries with django_assert_num_queries(3): assert len(audit_logs) == 2 first, second = audit_logs assert first.log_object == first_model assert second.log_object == second_model # Update queryset to include prefetch audit_logs = audit_logs.prefetch_related("log_object") with django_assert_num_queries(2): assert len(audit_logs) == 2 first, second = audit_logs assert first.log_object == first_model assert second.log_object == second_model @pytest.mark.usefixtures("db", "audit_logging_context") def test_prefetch_log_entries(django_assert_num_queries: Callable) -> None: """ Test that the audit logging context manager works and that we can insert data, and that the insert is audit logged. """ # Create two objects and then update both, generating 4 log entries MyAuditLoggedModel.objects.create(some_text="Some text") MyAuditLoggedModel.objects.create(some_text="Some text") MyAuditLoggedModel.objects.update(some_text="Some other text") assert AuditLogEntry.objects.count() == 4 models = MyAuditLoggedModel.objects.order_by("id") # Test without prefetch, should generate 3 queries with django_assert_num_queries(3): for model in models: audit_logs = model.audit_logs.all() assert len(audit_logs) == 2 # Update queryset to include prefetch audit_logs = models.prefetch_related("audit_logs") with django_assert_num_queries(2): for model in models: audit_logs = model.audit_logs.all() assert len(audit_logs) == 2
python
s = 0 for x in range(1000): if x % 5 != 0 and x % 7 != 0: s += 1 print(s)
python
# Entra na pasta onde está este arquivo, caso contrário ele faria tudo na pasta principal import os diretorio_geral = os.path.dirname(__file__) diretorio_local = 'texto01.txt' # Local e nome do arquivo que eu quero criar juntando_os_caminhos_do_diretorio_e_nome_do_arquivo_que_sera_criado = os.path.join(diretorio_geral, diretorio_local) # Criar, ler, escrever e apagar arquivos arquivo = open(juntando_os_caminhos_do_diretorio_e_nome_do_arquivo_que_sera_criado, 'w+') # Write (escreve) + Leitura # Poderia enviar tudo com apenas um write arquivo.write('Primeira Linha\n') arquivo.write('Segunda Linha\n') arquivo.write('Terceira Linha\n') # Retorna o cursor para o topo do arquivo arquivo.seek(0, 0) print('#################\n') print(arquivo.read()) # Lê o arquivo todo print('#################\n') # Retorna o cursor para o topo do arquivo novamente arquivo.seek(0, 0) print('*****************\n') print(arquivo.readline()) # Lê linha por linha print(arquivo.readline()) print(arquivo.readline()) print('*****************\n') arquivo.seek(0, 0) print('-----------------\n') print(arquivo.readlines(), '\n') # Salva todas as linhas dentro de uma lista print('-----------------\n') arquivo.seek(0, 0) # Também é possível usar o for nesta lista print('=================\n') lista_arquivo = arquivo.readlines() for linha in lista_arquivo: print(linha) print('=================\n') arquivo.close() # Fecha o arquivo
python
import pygame import random import sys from pygame.locals import * class TimedWordsTeamGame(object): BLACK = (0, 0, 0) WHITE = (255, 255, 255) RED = (255, 0, 0) YELLOW = (230, 230, 0) GREEN = (0, 128, 0) BLUE = (0, 0, 255) INV_PLAY_TIME = 0.5 NUM_TEAM_MEMBERS = 30 def __init__(self): pygame.init() pygame.mixer.init() self.sound_right = pygame.mixer.Sound('audio\\beep.ogg') self.sound_wrong = pygame.mixer.Sound('audio\\buzzer.ogg') self.sound_win = pygame.mixer.Sound('audio\\win.ogg') self.xRes = 1024 self.yRes = 768 self.DISPLAYSURF = pygame.display.set_mode((self.xRes, self.yRes), 0, 32) pygame.display.set_caption('Timed Words Team Game') self.DISPLAYSURF.fill(TimedWordsTeamGame.WHITE) self.font = pygame.font.SysFont(None, 72) self.team_font = pygame.font.SysFont(None, 32) self.team_1_name = "Team 1" self.team_2_name = "Team 2" self.active_team = random.sample([1, 2], 1)[0] self.team_1_score = 0 self.team_2_score = 0 self.words = [[["q11","q"],["q12","q"],["q13","q"],["a14","a"]],# add desired content here [["q21","q"],["q22","q"],["q23","q"],["a24","a"]], [["q31","q"],["q32","q"],["q33","q"],["a34","a"]], [["q41","q"],["q42","q"],["q43","q"],["a44","a"]]] def refresh_display(self): self.DISPLAYSURF.fill(TimedWordsTeamGame.WHITE) active_team_label = self.font.render("Team {0}".format(self.active_team), True, TimedWordsTeamGame.BLACK) team_1_label = self.team_font.render(self.team_1_name, True, TimedWordsTeamGame.BLACK) team_2_label = self.team_font.render(self.team_2_name, True, TimedWordsTeamGame.BLACK) active_team_label_rect = active_team_label.get_rect() team_1_label_rect = team_1_label.get_rect() team_2_label_rect = team_2_label.get_rect() active_team_label_rect.center = (self.xRes / 2, self.yRes / 2) team_1_label_rect.left = 10 team_2_label_rect.right = self.xRes - 10 team_1_label_rect.bottom = self.yRes - 10 team_2_label_rect.bottom = self.yRes - 10 self.DISPLAYSURF.blit(active_team_label, active_team_label_rect) self.DISPLAYSURF.blit(team_1_label, team_1_label_rect) self.DISPLAYSURF.blit(team_2_label, team_2_label_rect) team_1_rect = pygame.Rect(10, ((self.yRes - 40) - ((self.team_1_score) * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))), 40, (self.team_1_score * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))) team_2_rect = pygame.Rect(self.xRes - 50, ((self.yRes - 40) - ((self.team_2_score) * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))), 40, (self.team_2_score * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))) pygame.draw.rect(self.DISPLAYSURF, TimedWordsTeamGame.RED, team_1_rect) pygame.draw.rect(self.DISPLAYSURF, TimedWordsTeamGame.BLUE, team_2_rect) pygame.draw.line(self.DISPLAYSURF, TimedWordsTeamGame.BLACK, (0, 40), (self.xRes, 40), 4) pygame.display.update() while True: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() sys.exit() if event.type == pygame.KEYUP and event.key == pygame.K_SPACE: self.new_word() def game_score(self, key): self.end_ticks = pygame.time.get_ticks() team_scores = [self.team_1_score, self.team_2_score] points = 1000 / (self.end_ticks - self.start_ticks) if key == 'a': if self.word_list[0][1] == 'a': team_scores[self.active_team - 1] += points self.team_1_score, self.team_2_score = team_scores[0], team_scores[1] self.DISPLAYSURF.fill(TimedWordsTeamGame.WHITE) team_1_label = self.team_font.render(self.team_1_name, True, TimedWordsTeamGame.BLACK) team_2_label = self.team_font.render(self.team_2_name, True, TimedWordsTeamGame.BLACK) team_1_label_rect = team_1_label.get_rect() team_2_label_rect = team_2_label.get_rect() team_1_label_rect.left = 10 team_2_label_rect.right = self.xRes - 10 team_1_label_rect.bottom = self.yRes - 10 team_2_label_rect.bottom = self.yRes - 10 team_1_rect = pygame.Rect(10, ((self.yRes - 40) - ((self.team_1_score) * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))), 40, (self.team_1_score * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))) team_2_rect = pygame.Rect(self.xRes - 50, ((self.yRes - 40) - ((self.team_2_score) * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))), 40, (self.team_2_score * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))) frag_1_text = self.team_font.render(self.word_list[0][0], True, TimedWordsTeamGame.GREEN) frag_2_text = self.team_font.render(self.word_list[1][0], True, TimedWordsTeamGame.BLACK) frag_3_text = self.team_font.render(self.word_list[2][0], True, TimedWordsTeamGame.BLACK) frag_4_text = self.team_font.render(self.word_list[3][0], True, TimedWordsTeamGame.BLACK) frag_1_text_rect = frag_1_text.get_rect() frag_2_text_rect = frag_1_text.get_rect() frag_3_text_rect = frag_1_text.get_rect() frag_4_text_rect = frag_1_text.get_rect() frag_1_text_rect.center = (self.xRes / 2, (1 / 5) * self.yRes) frag_2_text_rect.center = (self.xRes / 2, (2 / 5) * self.yRes) frag_3_text_rect.center = (self.xRes / 2, (3 / 5) * self.yRes) frag_4_text_rect.center = (self.xRes / 2, (4 / 5) * self.yRes) self.DISPLAYSURF.blit(team_1_label, team_1_label_rect) self.DISPLAYSURF.blit(team_2_label, team_2_label_rect) self.DISPLAYSURF.blit(frag_1_text, frag_1_text_rect) self.DISPLAYSURF.blit(frag_2_text, frag_2_text_rect) self.DISPLAYSURF.blit(frag_3_text, frag_3_text_rect) self.DISPLAYSURF.blit(frag_4_text, frag_4_text_rect) pygame.draw.rect(self.DISPLAYSURF, TimedWordsTeamGame.RED, team_1_rect) pygame.draw.rect(self.DISPLAYSURF, TimedWordsTeamGame.BLUE, team_2_rect) pygame.draw.line(self.DISPLAYSURF, TimedWordsTeamGame.BLACK, (0, 40), (self.xRes, 40), 4) pygame.display.update() self.sound_right.play() else: self.DISPLAYSURF.fill(TimedWordsTeamGame.WHITE) team_1_label = self.team_font.render(self.team_1_name, True, TimedWordsTeamGame.BLACK) team_2_label = self.team_font.render(self.team_2_name, True, TimedWordsTeamGame.BLACK) team_1_label_rect = team_1_label.get_rect() team_2_label_rect = team_2_label.get_rect() team_1_label_rect.left = 10 team_2_label_rect.right = self.xRes - 10 team_1_label_rect.bottom = self.yRes - 10 team_2_label_rect.bottom = self.yRes - 10 team_1_rect = pygame.Rect(10, ((self.yRes - 40) - ((self.team_1_score) * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))), 40, (self.team_1_score * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))) team_2_rect = pygame.Rect(self.xRes - 50, ((self.yRes - 40) - ((self.team_2_score) * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))), 40, (self.team_2_score * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))) frag_1_text = self.team_font.render(self.word_list[0][0], True, TimedWordsTeamGame.RED) frag_2_text = self.team_font.render(self.word_list[1][0], True, TimedWordsTeamGame.BLACK) frag_3_text = self.team_font.render(self.word_list[2][0], True, TimedWordsTeamGame.BLACK) frag_4_text = self.team_font.render(self.word_list[3][0], True, TimedWordsTeamGame.BLACK) frag_1_text_rect = frag_1_text.get_rect() frag_2_text_rect = frag_1_text.get_rect() frag_3_text_rect = frag_1_text.get_rect() frag_4_text_rect = frag_1_text.get_rect() frag_1_text_rect.center = (self.xRes / 2, (1 / 5) * self.yRes) frag_2_text_rect.center = (self.xRes / 2, (2 / 5) * self.yRes) frag_3_text_rect.center = (self.xRes / 2, (3 / 5) * self.yRes) frag_4_text_rect.center = (self.xRes / 2, (4 / 5) * self.yRes) self.DISPLAYSURF.blit(team_1_label, team_1_label_rect) self.DISPLAYSURF.blit(team_2_label, team_2_label_rect) self.DISPLAYSURF.blit(frag_1_text, frag_1_text_rect) self.DISPLAYSURF.blit(frag_2_text, frag_2_text_rect) self.DISPLAYSURF.blit(frag_3_text, frag_3_text_rect) self.DISPLAYSURF.blit(frag_4_text, frag_4_text_rect) pygame.draw.rect(self.DISPLAYSURF, TimedWordsTeamGame.RED, team_1_rect) pygame.draw.rect(self.DISPLAYSURF, TimedWordsTeamGame.BLUE, team_2_rect) pygame.draw.line(self.DISPLAYSURF, TimedWordsTeamGame.BLACK, (0, 40), (self.xRes, 40), 4) pygame.display.update() self.sound_wrong.play() if key == 'b': if self.word_list[1][1] == 'a': team_scores[self.active_team - 1] += points self.team_1_score, self.team_2_score = team_scores[0], team_scores[1] self.DISPLAYSURF.fill(TimedWordsTeamGame.WHITE) team_1_label = self.team_font.render(self.team_1_name, True, TimedWordsTeamGame.BLACK) team_2_label = self.team_font.render(self.team_2_name, True, TimedWordsTeamGame.BLACK) team_1_label_rect = team_1_label.get_rect() team_2_label_rect = team_2_label.get_rect() team_1_label_rect.left = 10 team_2_label_rect.right = self.xRes - 10 team_1_label_rect.bottom = self.yRes - 10 team_2_label_rect.bottom = self.yRes - 10 team_1_rect = pygame.Rect(10, ((self.yRes - 40) - ((self.team_1_score) * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))), 40, (self.team_1_score * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))) team_2_rect = pygame.Rect(self.xRes - 50, ((self.yRes - 40) - ((self.team_2_score) * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))), 40, (self.team_2_score * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))) frag_1_text = self.team_font.render(self.word_list[0][0], True, TimedWordsTeamGame.BLACK) frag_2_text = self.team_font.render(self.word_list[1][0], True, TimedWordsTeamGame.GREEN) frag_3_text = self.team_font.render(self.word_list[2][0], True, TimedWordsTeamGame.BLACK) frag_4_text = self.team_font.render(self.word_list[3][0], True, TimedWordsTeamGame.BLACK) frag_1_text_rect = frag_1_text.get_rect() frag_2_text_rect = frag_1_text.get_rect() frag_3_text_rect = frag_1_text.get_rect() frag_4_text_rect = frag_1_text.get_rect() frag_1_text_rect.center = (self.xRes / 2, (1 / 5) * self.yRes) frag_2_text_rect.center = (self.xRes / 2, (2 / 5) * self.yRes) frag_3_text_rect.center = (self.xRes / 2, (3 / 5) * self.yRes) frag_4_text_rect.center = (self.xRes / 2, (4 / 5) * self.yRes) self.DISPLAYSURF.blit(team_1_label, team_1_label_rect) self.DISPLAYSURF.blit(team_2_label, team_2_label_rect) self.DISPLAYSURF.blit(frag_1_text, frag_1_text_rect) self.DISPLAYSURF.blit(frag_2_text, frag_2_text_rect) self.DISPLAYSURF.blit(frag_3_text, frag_3_text_rect) self.DISPLAYSURF.blit(frag_4_text, frag_4_text_rect) pygame.draw.rect(self.DISPLAYSURF, TimedWordsTeamGame.RED, team_1_rect) pygame.draw.rect(self.DISPLAYSURF, TimedWordsTeamGame.BLUE, team_2_rect) pygame.draw.line(self.DISPLAYSURF, TimedWordsTeamGame.BLACK, (0, 40), (self.xRes, 40), 4) pygame.display.update() self.sound_right.play() else: self.DISPLAYSURF.fill(TimedWordsTeamGame.WHITE) team_1_label = self.team_font.render(self.team_1_name, True, TimedWordsTeamGame.BLACK) team_2_label = self.team_font.render(self.team_2_name, True, TimedWordsTeamGame.BLACK) team_1_label_rect = team_1_label.get_rect() team_2_label_rect = team_2_label.get_rect() team_1_label_rect.left = 10 team_2_label_rect.right = self.xRes - 10 team_1_label_rect.bottom = self.yRes - 10 team_2_label_rect.bottom = self.yRes - 10 team_1_rect = pygame.Rect(10, ((self.yRes - 40) - ((self.team_1_score) * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))), 40, (self.team_1_score * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))) team_2_rect = pygame.Rect(self.xRes - 50, ((self.yRes - 40) - ((self.team_2_score) * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))), 40, (self.team_2_score * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))) frag_1_text = self.team_font.render(self.word_list[0][0], True, TimedWordsTeamGame.BLACK) frag_2_text = self.team_font.render(self.word_list[1][0], True, TimedWordsTeamGame.RED) frag_3_text = self.team_font.render(self.word_list[2][0], True, TimedWordsTeamGame.BLACK) frag_4_text = self.team_font.render(self.word_list[3][0], True, TimedWordsTeamGame.BLACK) frag_1_text_rect = frag_1_text.get_rect() frag_2_text_rect = frag_1_text.get_rect() frag_3_text_rect = frag_1_text.get_rect() frag_4_text_rect = frag_1_text.get_rect() frag_1_text_rect.center = (self.xRes / 2, (1 / 5) * self.yRes) frag_2_text_rect.center = (self.xRes / 2, (2 / 5) * self.yRes) frag_3_text_rect.center = (self.xRes / 2, (3 / 5) * self.yRes) frag_4_text_rect.center = (self.xRes / 2, (4 / 5) * self.yRes) self.DISPLAYSURF.blit(team_1_label, team_1_label_rect) self.DISPLAYSURF.blit(team_2_label, team_2_label_rect) self.DISPLAYSURF.blit(frag_1_text, frag_1_text_rect) self.DISPLAYSURF.blit(frag_2_text, frag_2_text_rect) self.DISPLAYSURF.blit(frag_3_text, frag_3_text_rect) self.DISPLAYSURF.blit(frag_4_text, frag_4_text_rect) pygame.draw.rect(self.DISPLAYSURF, TimedWordsTeamGame.RED, team_1_rect) pygame.draw.rect(self.DISPLAYSURF, TimedWordsTeamGame.BLUE, team_2_rect) pygame.draw.line(self.DISPLAYSURF, TimedWordsTeamGame.BLACK, (0, 40), (self.xRes, 40), 4) pygame.display.update() self.sound_wrong.play() if key == 'c': if self.word_list[2][1] == 'a': team_scores[self.active_team - 1] += points self.team_1_score, self.team_2_score = team_scores[0], team_scores[1] self.DISPLAYSURF.fill(TimedWordsTeamGame.WHITE) team_1_label = self.team_font.render(self.team_1_name, True, TimedWordsTeamGame.BLACK) team_2_label = self.team_font.render(self.team_2_name, True, TimedWordsTeamGame.BLACK) team_1_label_rect = team_1_label.get_rect() team_2_label_rect = team_2_label.get_rect() team_1_label_rect.left = 10 team_2_label_rect.right = self.xRes - 10 team_1_label_rect.bottom = self.yRes - 10 team_2_label_rect.bottom = self.yRes - 10 team_1_rect = pygame.Rect(10, ((self.yRes - 40) - ((self.team_1_score) * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))), 40, (self.team_1_score * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))) team_2_rect = pygame.Rect(self.xRes - 50, ((self.yRes - 40) - ((self.team_2_score) * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))), 40, (self.team_2_score * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))) frag_1_text = self.team_font.render(self.word_list[0][0], True, TimedWordsTeamGame.BLACK) frag_2_text = self.team_font.render(self.word_list[1][0], True, TimedWordsTeamGame.BLACK) frag_3_text = self.team_font.render(self.word_list[2][0], True, TimedWordsTeamGame.GREEN) frag_4_text = self.team_font.render(self.word_list[3][0], True, TimedWordsTeamGame.BLACK) frag_1_text_rect = frag_1_text.get_rect() frag_2_text_rect = frag_1_text.get_rect() frag_3_text_rect = frag_1_text.get_rect() frag_4_text_rect = frag_1_text.get_rect() frag_1_text_rect.center = (self.xRes / 2, (1 / 5) * self.yRes) frag_2_text_rect.center = (self.xRes / 2, (2 / 5) * self.yRes) frag_3_text_rect.center = (self.xRes / 2, (3 / 5) * self.yRes) frag_4_text_rect.center = (self.xRes / 2, (4 / 5) * self.yRes) self.DISPLAYSURF.blit(team_1_label, team_1_label_rect) self.DISPLAYSURF.blit(team_2_label, team_2_label_rect) self.DISPLAYSURF.blit(frag_1_text, frag_1_text_rect) self.DISPLAYSURF.blit(frag_2_text, frag_2_text_rect) self.DISPLAYSURF.blit(frag_3_text, frag_3_text_rect) self.DISPLAYSURF.blit(frag_4_text, frag_4_text_rect) pygame.draw.rect(self.DISPLAYSURF, TimedWordsTeamGame.RED, team_1_rect) pygame.draw.rect(self.DISPLAYSURF, TimedWordsTeamGame.BLUE, team_2_rect) pygame.draw.line(self.DISPLAYSURF, TimedWordsTeamGame.BLACK, (0, 40), (self.xRes, 40), 4) pygame.display.update() self.sound_right.play() else: self.DISPLAYSURF.fill(TimedWordsTeamGame.WHITE) team_1_label = self.team_font.render(self.team_1_name, True, TimedWordsTeamGame.BLACK) team_2_label = self.team_font.render(self.team_2_name, True, TimedWordsTeamGame.BLACK) team_1_label_rect = team_1_label.get_rect() team_2_label_rect = team_2_label.get_rect() team_1_label_rect.left = 10 team_2_label_rect.right = self.xRes - 10 team_1_label_rect.bottom = self.yRes - 10 team_2_label_rect.bottom = self.yRes - 10 team_1_rect = pygame.Rect(10, ((self.yRes - 40) - ((self.team_1_score) * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))), 40, (self.team_1_score * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))) team_2_rect = pygame.Rect(self.xRes - 50, ((self.yRes - 40) - ((self.team_2_score) * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))), 40, (self.team_2_score * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))) frag_1_text = self.team_font.render(self.word_list[0][0], True, TimedWordsTeamGame.BLACK) frag_2_text = self.team_font.render(self.word_list[1][0], True, TimedWordsTeamGame.BLACK) frag_3_text = self.team_font.render(self.word_list[2][0], True, TimedWordsTeamGame.RED) frag_4_text = self.team_font.render(self.word_list[3][0], True, TimedWordsTeamGame.BLACK) frag_1_text_rect = frag_1_text.get_rect() frag_2_text_rect = frag_1_text.get_rect() frag_3_text_rect = frag_1_text.get_rect() frag_4_text_rect = frag_1_text.get_rect() frag_1_text_rect.center = (self.xRes / 2, (1 / 5) * self.yRes) frag_2_text_rect.center = (self.xRes / 2, (2 / 5) * self.yRes) frag_3_text_rect.center = (self.xRes / 2, (3 / 5) * self.yRes) frag_4_text_rect.center = (self.xRes / 2, (4 / 5) * self.yRes) self.DISPLAYSURF.blit(team_1_label, team_1_label_rect) self.DISPLAYSURF.blit(team_2_label, team_2_label_rect) self.DISPLAYSURF.blit(frag_1_text, frag_1_text_rect) self.DISPLAYSURF.blit(frag_2_text, frag_2_text_rect) self.DISPLAYSURF.blit(frag_3_text, frag_3_text_rect) self.DISPLAYSURF.blit(frag_4_text, frag_4_text_rect) pygame.draw.rect(self.DISPLAYSURF, TimedWordsTeamGame.RED, team_1_rect) pygame.draw.rect(self.DISPLAYSURF, TimedWordsTeamGame.BLUE, team_2_rect) pygame.draw.line(self.DISPLAYSURF, TimedWordsTeamGame.BLACK, (0, 40), (self.xRes, 40), 4) pygame.display.update() self.sound_wrong.play() if key == 'd': if self.word_list[3][1] == 'a': team_scores[self.active_team - 1] += points self.team_1_score, self.team_2_score = team_scores[0], team_scores[1] self.DISPLAYSURF.fill(TimedWordsTeamGame.WHITE) team_1_label = self.team_font.render(self.team_1_name, True, TimedWordsTeamGame.BLACK) team_2_label = self.team_font.render(self.team_2_name, True, TimedWordsTeamGame.BLACK) team_1_label_rect = team_1_label.get_rect() team_2_label_rect = team_2_label.get_rect() team_1_label_rect.left = 10 team_2_label_rect.right = self.xRes - 10 team_1_label_rect.bottom = self.yRes - 10 team_2_label_rect.bottom = self.yRes - 10 team_1_rect = pygame.Rect(10, ((self.yRes - 40) - ((self.team_1_score) * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))), 40, (self.team_1_score * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))) team_2_rect = pygame.Rect(self.xRes - 50, ((self.yRes - 40) - ((self.team_2_score) * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))), 40, (self.team_2_score * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))) frag_1_text = self.team_font.render(self.word_list[0][0], True, TimedWordsTeamGame.BLACK) frag_2_text = self.team_font.render(self.word_list[1][0], True, TimedWordsTeamGame.BLACK) frag_3_text = self.team_font.render(self.word_list[2][0], True, TimedWordsTeamGame.BLACK) frag_4_text = self.team_font.render(self.word_list[3][0], True, TimedWordsTeamGame.GREEN) frag_1_text_rect = frag_1_text.get_rect() frag_2_text_rect = frag_1_text.get_rect() frag_3_text_rect = frag_1_text.get_rect() frag_4_text_rect = frag_1_text.get_rect() frag_1_text_rect.center = (self.xRes / 2, (1 / 5) * self.yRes) frag_2_text_rect.center = (self.xRes / 2, (2 / 5) * self.yRes) frag_3_text_rect.center = (self.xRes / 2, (3 / 5) * self.yRes) frag_4_text_rect.center = (self.xRes / 2, (4 / 5) * self.yRes) self.DISPLAYSURF.blit(team_1_label, team_1_label_rect) self.DISPLAYSURF.blit(team_2_label, team_2_label_rect) self.DISPLAYSURF.blit(frag_1_text, frag_1_text_rect) self.DISPLAYSURF.blit(frag_2_text, frag_2_text_rect) self.DISPLAYSURF.blit(frag_3_text, frag_3_text_rect) self.DISPLAYSURF.blit(frag_4_text, frag_4_text_rect) pygame.draw.rect(self.DISPLAYSURF, TimedWordsTeamGame.RED, team_1_rect) pygame.draw.rect(self.DISPLAYSURF, TimedWordsTeamGame.BLUE, team_2_rect) pygame.draw.line(self.DISPLAYSURF, TimedWordsTeamGame.BLACK, (0, 40), (self.xRes, 40), 4) pygame.display.update() self.sound_right.play() else: self.DISPLAYSURF.fill(TimedWordsTeamGame.WHITE) team_1_label = self.team_font.render(self.team_1_name, True, TimedWordsTeamGame.BLACK) team_2_label = self.team_font.render(self.team_2_name, True, TimedWordsTeamGame.BLACK) team_1_label_rect = team_1_label.get_rect() team_2_label_rect = team_2_label.get_rect() team_1_label_rect.left = 10 team_2_label_rect.right = self.xRes - 10 team_1_label_rect.bottom = self.yRes - 10 team_2_label_rect.bottom = self.yRes - 10 team_1_rect = pygame.Rect(10, ((self.yRes - 40) - ((self.team_1_score) * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))), 40, (self.team_1_score * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))) team_2_rect = pygame.Rect(self.xRes - 50, ((self.yRes - 40) - ((self.team_2_score) * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))), 40, (self.team_2_score * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)))) frag_1_text = self.team_font.render(self.word_list[0][0], True, TimedWordsTeamGame.BLACK) frag_2_text = self.team_font.render(self.word_list[1][0], True, TimedWordsTeamGame.BLACK) frag_3_text = self.team_font.render(self.word_list[2][0], True, TimedWordsTeamGame.BLACK) frag_4_text = self.team_font.render(self.word_list[3][0], True, TimedWordsTeamGame.RED) frag_1_text_rect = frag_1_text.get_rect() frag_2_text_rect = frag_1_text.get_rect() frag_3_text_rect = frag_1_text.get_rect() frag_4_text_rect = frag_1_text.get_rect() frag_1_text_rect.center = (self.xRes / 2, (1 / 5) * self.yRes) frag_2_text_rect.center = (self.xRes / 2, (2 / 5) * self.yRes) frag_3_text_rect.center = (self.xRes / 2, (3 / 5) * self.yRes) frag_4_text_rect.center = (self.xRes / 2, (4 / 5) * self.yRes) self.DISPLAYSURF.blit(team_1_label, team_1_label_rect) self.DISPLAYSURF.blit(team_2_label, team_2_label_rect) self.DISPLAYSURF.blit(frag_1_text, frag_1_text_rect) self.DISPLAYSURF.blit(frag_2_text, frag_2_text_rect) self.DISPLAYSURF.blit(frag_3_text, frag_3_text_rect) self.DISPLAYSURF.blit(frag_4_text, frag_4_text_rect) pygame.draw.rect(self.DISPLAYSURF, TimedWordsTeamGame.RED, team_1_rect) pygame.draw.rect(self.DISPLAYSURF, TimedWordsTeamGame.BLUE, team_2_rect) pygame.draw.line(self.DISPLAYSURF, TimedWordsTeamGame.BLACK, (0, 40), (self.xRes, 40), 4) pygame.display.update() self.sound_wrong.play() if (team_scores[self.active_team - 1] * ((self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS))) >= (self.yRes - 80): self.game_over() pygame.time.delay(3000)# modify according to needs self.active_team = 1 if self.active_team == 2 else 2 self.refresh_display() def game_over(self): self.DISPLAYSURF.fill(TimedWordsTeamGame.WHITE) text = self.font.render("Team {0} wins!".format(self.active_team), True, TimedWordsTeamGame.GREEN) textpos = text.get_rect() textpos.center = (self.xRes / 2, self.yRes / 2) self.DISPLAYSURF.blit(text,textpos) self.team_1_score = 0 self.team_2_score = 0 pygame.display.update() self.sound_win.play() while True: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() sys.exit() if event.type == pygame.KEYUP and event.key == pygame.K_SPACE: self.run() def new_word(self): self.word_list = random.sample(self.words, 1)[0] random.shuffle(self.word_list) self.DISPLAYSURF.fill(TimedWordsTeamGame.WHITE) team_1_label = self.team_font.render(self.team_1_name, True, TimedWordsTeamGame.BLACK) team_2_label = self.team_font.render(self.team_2_name, True, TimedWordsTeamGame.BLACK) team_1_label_rect = team_1_label.get_rect() team_2_label_rect = team_2_label.get_rect() team_1_label_rect.left = 10 team_2_label_rect.right = self.xRes - 10 team_1_label_rect.bottom = self.yRes - 10 team_2_label_rect.bottom = self.yRes - 10 team_1_rect = pygame.Rect(10, ((self.yRes - 40) - (self.team_1_score) * (self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)), 40, (self.team_1_score) * (self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)) team_2_rect = pygame.Rect(self.xRes - 50, ((self.yRes - 40) - (self.team_2_score) * (self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)), 40, (self.team_2_score) * (self.yRes - 80) / (TimedWordsTeamGame.INV_PLAY_TIME * TimedWordsTeamGame.NUM_TEAM_MEMBERS)) frag_1_text = self.team_font.render(self.word_list[0][0], True, TimedWordsTeamGame.RED) frag_2_text = self.team_font.render(self.word_list[1][0], True, TimedWordsTeamGame.YELLOW) frag_3_text = self.team_font.render(self.word_list[2][0], True, TimedWordsTeamGame.GREEN) frag_4_text = self.team_font.render(self.word_list[3][0], True, TimedWordsTeamGame.BLUE) frag_1_text_rect = frag_1_text.get_rect() frag_2_text_rect = frag_1_text.get_rect() frag_3_text_rect = frag_1_text.get_rect() frag_4_text_rect = frag_1_text.get_rect() frag_1_text_rect.center = (self.xRes / 2, (1 / 5) * self.yRes) frag_2_text_rect.center = (self.xRes / 2, (2 / 5) * self.yRes) frag_3_text_rect.center = (self.xRes / 2, (3 / 5) * self.yRes) frag_4_text_rect.center = (self.xRes / 2, (4 / 5) * self.yRes) self.DISPLAYSURF.blit(team_1_label, team_1_label_rect) self.DISPLAYSURF.blit(team_2_label, team_2_label_rect) self.DISPLAYSURF.blit(frag_1_text, frag_1_text_rect) self.DISPLAYSURF.blit(frag_2_text, frag_2_text_rect) self.DISPLAYSURF.blit(frag_3_text, frag_3_text_rect) self.DISPLAYSURF.blit(frag_4_text, frag_4_text_rect) pygame.draw.rect(self.DISPLAYSURF, TimedWordsTeamGame.RED, team_1_rect) pygame.draw.rect(self.DISPLAYSURF, TimedWordsTeamGame.BLUE, team_2_rect) pygame.draw.line(self.DISPLAYSURF, TimedWordsTeamGame.BLACK, (0, 40), (self.xRes, 40), 4) pygame.display.update() self.start_ticks = pygame.time.get_ticks() self.run() def run(self): while True: for event in pygame.event.get(): if event.type == QUIT: pygame.quit() sys.exit() if event.type == pygame.KEYUP: if event.key == pygame.K_ESCAPE: pygame.quit() sys.exit() if event.key == pygame.K_SPACE: self.refresh_display() if event.key == pygame.K_d:#these pygame keys (a, e, i, m) will depend on your hardware setup self.game_score('a') if event.key == pygame.K_h: self.game_score('b') if event.key == pygame.K_l: self.game_score('c') if event.key == pygame.K_p: self.game_score('d') pygame.display.update() if __name__ == '__main__': new_instance = TimedWordsTeamGame() new_instance.run()
python
import numpy as np # TODO: convert these to params files # params used for the inverted pendulum system m = 1.4 # mass of quadrotor (kg) L = 0.3 # length from center of mass to point of thrust (meters) gr = 9.81 # gravity (m/s^2) I = m * L ** 2 b = 0. max_torque = 1.0 max_speed = 8 states = 2 # theta and thetadot num_controllers = 1 total_time = 1 # total time duration (s) dt = 0.01 # discretization timestep timesteps = int(total_time / dt) # total timesteps # goal state xf = np.zeros([states, 1]) xf[0, 0] = np.pi xf[1, 0] = 0 # ddp parameters num_iter = 50 # optimization iterations # TODO: fix this so learned_pendulum doesn't have to use this I guess Q_f_ddp = np.diag([100, 1]) Q_r_ddp = np.zeros([states, states]) R_ddp = 0.1 * np.eye(num_controllers) gamma = 0.5 # how much we account for du in updating the control during optimization
python
import os from RouterConfiguration.Cisco.cisco_config_features import * from utils import * from network_features import * def route_map_deny(rm, seq): rm.perm[seq] = 'deny' return f'{rm} {rm.perm[seq]} {seq}' def route_map_permit(rm, seq): rm.perm[seq] = 'permit' return f'{rm} {rm.perm[seq]} {seq}' feature_config = { RouterFeatures.STATIC_ROUTE: lambda network, interface: f'ip route {int_to_ip(network.address)} {int_to_upper_mask(network.prefix)} {interface}', OSPFFeatures.INTERFACE_OSPF_COST: lambda interface, cost: f' ip ospf cost {cost}', OSPFFeatures.INTERFACE_OSPF_PRIORITY: lambda interface, priority: f' ip ospf priority {priority}', OSPFFeatures.AUTO_COST: lambda bandwidth: f' auto-cost reference-bandwidth {bandwidth}', OSPFFeatures.NO_COMPATIBLE_RFC1583: lambda: ' no compatible rfc1583', OSPFFeatures.DEFAULT_INFORMATION_ORIGINATE: lambda always, metric, metric_type: f' default-information originate {always}{metric}{metric_type}', OSPFFeatures.DEFAULT_METRIC: lambda metric: f' default-metric {metric}', OSPFFeatures.DISTANCE: lambda dist: f' distance {dist}', OSPFFeatures.REDISTRIBUTE_CONNECTED: lambda subnets: f' redistribute connected {subnets}', OSPFFeatures.REDISTRIBUTE_STATIC: lambda subnets: f' redistribute static {subnets}', OSPFFeatures.REDISTRIBUTE_BGP: lambda asn, subnets: f' redistribute bgp {asn}{subnets}', OSPFFeatures.MAX_METRIC: lambda external, stub, summary: f' max-metric router-lsa {external}{stub}{summary}', OSPFFeatures.AREA_FILTER_LIST: lambda area, filter_list, dir: f' area {area} filter-list prefix {filter_list}{dir}', OSPFFeatures.AREA_RANGE: lambda area, network, advertise, cost: f' area {area} range {int_to_ip(network.address)} {int_to_upper_mask(network.prefix)}{advertise}{cost}', OSPFFeatures.NSSA_STUB_DEFAULT_COST: lambda area, cost: f' area {area} default-cost {cost}', OSPFFeatures.NSSA_NO_REDISTRIBUTION: lambda area: f' area {area} nssa no-redistribution', OSPFFeatures.NSSA_DEFAULT_INFORMATION_ORIGINATE: lambda area, metric, metric_type: f' area {area} nssa default-information-originate{metric}{metric_type}', OSPFFeatures.NSSA_NO_SUMMARY: lambda area: f' area {area} nssa no-summary', OSPFFeatures.NSSA_ONLY: lambda area: f' area {area} nssa nssa-only', OSPFFeatures.STUB_NO_SUMMARY: lambda area: f' area {area} stub no-summary', BGPFeatures.ALWAYS_COMPARE_MED: lambda: ' bgp always-compare-med', BGPFeatures.BESTPATH_COMPARE_ROUTERID: lambda: ' bgp bestpath compare-routerid', BGPFeatures.BESTPATH_MED_CONFED: lambda missing_as_worst: f' bgp bestpath med confed {missing_as_worst}', BGPFeatures.BESTPATH_MED_MISSING: lambda: ' bgp bestpath med missing-as-worst', BGPFeatures.NO_CLIENT_TO_CLIENT_REFLECTION: lambda: ' no bgp client-to-client reflection', BGPFeatures.DEFAULT_LOCAL_PREFERENCE: lambda preference: f' bgp default local-preference {preference}', BGPFeatures.DETERMINISTIC_MED: lambda: ' bgp deterministic-med', BGPFeatures.MAXAS_LIMIT: lambda limit: f' bgp maxas-limit {limit}', BGPFeatures.DEFAULT_INFORMATION_ORIGINATE: lambda: ' default-information originate', BGPFeatures.ADDITIONAL_PATHS_INSTALL: lambda: ' bgp additional-paths install', BGPFeatures.AUTO_SUMMARY: lambda: ' auto-summary', BGPFeatures.BGP_DAMPENING: lambda route_map: f' bgp dampening {route_map or ""}', BGPFeatures.DISTANCE_BGP: lambda external, internal, local: f' distance bgp {external} {internal} {local}', BGPFeatures.REDISTRIBUTE_CONNECTED: lambda route_map: f' redistribute connected {route_map or ""}', BGPFeatures.REDISTRIBUTE_STATIC: lambda route_map: f' redistribute static {route_map or ""}', BGPFeatures.REDISTRIBUTE_OSPF: lambda route_map: f' redistribute ospf {route_map or ""}', BGPFeatures.SYNCHRONIZATION: lambda: ' synchronization', BGPFeatures.TABLE_MAP: lambda use_filter, route_map: f' table-map {route_map.name}{use_filter}', BGPFeatures.AGGREGATE_ADDRESS: lambda network, as_set, summary: f' aggregate-address {int_to_ip(network.address)} {int_to_upper_mask(network.prefix)}{as_set}{summary}', BGPFeatures.ADDITIONAL_PATHS: lambda options: f' bgp additional-paths {options}', BGPFeatures.NEIGHBOUR_MAXIMUM_PREFIX: lambda neighbour, max_prefix: f' neighbor {int_to_ip(neighbour)} maximum-prefix {max_prefix}', BGPFeatures.NEIGHBOUR_ROUTE_MAP_IN: lambda neighbour, route_map: f' neighbor {int_to_ip(neighbour)}{route_map} in', BGPFeatures.NEIGHBOUR_ROUTE_MAP_OUT: lambda neighbour, route_map: f' neighbor {int_to_ip(neighbour)}{route_map} out', BGPFeatures.NEIGHBOUR_NEXT_HOP_SELF: lambda neighbour: f' neighbor {int_to_ip(neighbour)} next-hop-self', BGPFeatures.NEIGHBOUR_CAPABILITY_ORF_PREFIX_LIST: lambda neighbour, options: f' neighbor {int_to_ip(neighbour)} capability orf prefix-list {options}', BGPFeatures.NEIGHBOUR_DEFAULT_ORIGINATE: lambda neighbour, route_map: f' neighbor {int_to_ip(neighbour)} default-originate {route_map or ""}', BGPFeatures.NEIGHBOUR_ROUTE_REFLECTOR_CLIENT: lambda neighbour: f' neighbor {int_to_ip(neighbour)} route-reflector-client', BGPFeatures.NEIGHBOUR_WEIGHT: lambda neighbour, weight: f' neighbor {int_to_ip(neighbour)} weight {weight}', RouteMapFeatures.MATCH_INTERFACE: lambda rm, seq, interface: f' match interface {interface}', RouteMapFeatures.MATCH_IP_PREFIX_LIST: lambda rm, seq, prefix_list: f' match ip address prefix-list {prefix_list}', RouteMapFeatures.MATCH_IP_NEXT_HOP: lambda rm, seq, access_list: f' match ip next-hop {access_list}', RouteMapFeatures.SET_INTERFACE: lambda rm, seq, interface: f' set interface {interface}', RouteMapFeatures.SET_IP_DEFAULT_NEXT_HOP: lambda rm, seq, ip: f' set ip default next-hop {int_to_ip(ip)}', RouteMapFeatures.SET_IP_NEXT_HOP: lambda rm, seq, ip: f' set ip next-hop {int_to_ip(ip)}', RouteMapFeatures.SET_METRIC: lambda rm, seq, metric: f' set metric {metric}', RouteMapFeatures.CONTINUE: lambda rm, seq: f' continue', RouteMapFeatures.MATCH_AS_PATH_ACCESS_LIST: lambda rm, seq, as_path: f' match as-path {as_path}', RouteMapFeatures.MATCH_COMMUNITY_LIST: lambda rm, seq, community, exact: f' match community {community} {exact}', RouteMapFeatures.SET_LOCAL_PREFERENCE: lambda rm, seq, preference: f' set local-preference {preference}', RouteMapFeatures.SET_AS_PATH_PREPEND: lambda rm, seq, AS: f' set as-path prepend {AS}', RouteMapFeatures.SET_COMM_LIST_DELETE: lambda rm, seq, community: f' set comm-list {community} delete', RouteMapFeatures.SET_COMMUNITY: lambda rm, seq, community, additive: f' set community {community} {additive}', RouteMapFeatures.SET_ORIGIN: lambda rm, seq, origin: f' set origin {origin}', RouteMapFeatures.SET_WEIGHT: lambda rm, seq, weight: f' set weight {weight}', RouteMapFeatures.SET_METRIC_TYPE_INTERNAL: lambda rm, seq: f' set metric-type internal', RouteMapFeatures.MATCH_FEATURE_BGP_OUT: lambda rm, seq, feature, *args: feature_config[feature](rm, seq, *args), RouteMapFeatures.MATCH_FEATURE_BGP_IN: lambda rm, seq, feature, *args: feature_config[feature](rm, seq, *args), RouteMapFeatures.SET_FEATURE_BGP_OUT: lambda rm, seq, feature, *args: feature_config[feature](rm, seq, *args), RouteMapFeatures.SET_FEATURE_BGP_IN: lambda rm, seq, feature, *args: feature_config[feature](rm, seq, *args), RouteMapFeatures.ROUTE_MAP_DENY: lambda rm, seq: route_map_deny(rm, seq), } feature_disable = { RouterFeatures.STATIC_ROUTE: lambda network, interface: f'no ip route {int_to_ip(network.address)} {int_to_upper_mask(network.prefix)} {interface}', OSPFFeatures.INTERFACE_OSPF_COST: lambda interface, cost: f'no ip ospf cost', OSPFFeatures.INTERFACE_OSPF_PRIORITY: lambda interface, priority: f'no ip ospf priority', OSPFFeatures.AUTO_COST: lambda bandwidth: f'no auto-cost reference-bandwidth {bandwidth}', OSPFFeatures.NO_COMPATIBLE_RFC1583: lambda: 'compatible rfc1583', OSPFFeatures.DEFAULT_INFORMATION_ORIGINATE: lambda always, metric, metric_type: f'no default-information originate', OSPFFeatures.DEFAULT_METRIC: lambda metric: f'no default-metric {metric}', OSPFFeatures.DISTANCE: lambda dist: f'no distance {dist}', OSPFFeatures.REDISTRIBUTE_CONNECTED: lambda subnets: f'no redistribute connected', OSPFFeatures.REDISTRIBUTE_STATIC: lambda subnets: f'no redistribute static', OSPFFeatures.REDISTRIBUTE_BGP: lambda asn, subnets: f'no redistribute bgp {asn}', OSPFFeatures.MAX_METRIC: lambda external, stub, summary: f'no max-metric router-lsa', OSPFFeatures.AREA_FILTER_LIST: lambda area, filter_list, dir: f'no area {area} filter-list prefix {filter_list}{dir}', OSPFFeatures.AREA_RANGE: lambda area, network, advertise, cost: f'no area {area} range {int_to_ip(network.address)} {int_to_upper_mask(network.prefix)}', OSPFFeatures.NSSA_STUB_DEFAULT_COST: lambda area, cost: f'no area {area} default-cost', OSPFFeatures.NSSA_NO_REDISTRIBUTION: lambda area: f'no area {area} nssa no-redistribution', OSPFFeatures.NSSA_DEFAULT_INFORMATION_ORIGINATE: lambda area, metric, metric_type: f'no area {area} nssa default-information-originate', OSPFFeatures.NSSA_NO_SUMMARY: lambda area: f'no area {area} nssa no-summary', OSPFFeatures.NSSA_ONLY: lambda area: f'no area {area} nssa nssa-only', OSPFFeatures.STUB_NO_SUMMARY: lambda area: f'no area {area} stub no-summary', BGPFeatures.ALWAYS_COMPARE_MED: lambda: 'no bgp always-compare-med', BGPFeatures.BESTPATH_COMPARE_ROUTERID: lambda: 'no bgp bestpath compare-routerid', BGPFeatures.BESTPATH_MED_CONFED: lambda missing_as_worst: f'no bgp bestpath med confed {missing_as_worst}\n', BGPFeatures.BESTPATH_MED_MISSING: lambda: 'no bgp bestpath med missing-as-worst', BGPFeatures.NO_CLIENT_TO_CLIENT_REFLECTION: lambda: ' bgp client-to-client reflection', BGPFeatures.DEFAULT_LOCAL_PREFERENCE: lambda preference: f'no bgp default local-preference', BGPFeatures.DETERMINISTIC_MED: lambda: 'no bgp deterministic-med', BGPFeatures.MAXAS_LIMIT: lambda limit: f'no bgp maxas-limit', BGPFeatures.DEFAULT_INFORMATION_ORIGINATE: lambda: 'no default-information originate', BGPFeatures.ADDITIONAL_PATHS_INSTALL: lambda: 'no bgp additional-paths install', BGPFeatures.AUTO_SUMMARY: lambda: 'no auto-summary', BGPFeatures.BGP_DAMPENING: lambda route_map: f'no bgp dampening', BGPFeatures.DISTANCE_BGP: lambda external, internal, local: f'no distance bgp', BGPFeatures.REDISTRIBUTE_CONNECTED: lambda route_map: f'no redistribute connected', BGPFeatures.REDISTRIBUTE_STATIC: lambda route_map: f'no redistribute static', BGPFeatures.REDISTRIBUTE_OSPF: lambda route_map: f'no redistribute ospf', BGPFeatures.SYNCHRONIZATION: lambda: 'no synchronization', BGPFeatures.TABLE_MAP: lambda use_filter, route_map: f'no table-map', BGPFeatures.AGGREGATE_ADDRESS: lambda network, as_set, summary: f'no aggregate-address {int_to_ip(network.address)} {int_to_upper_mask(network.prefix)}', BGPFeatures.ADDITIONAL_PATHS: lambda options: f'no bgp additional-paths', BGPFeatures.NEIGHBOUR_MAXIMUM_PREFIX: lambda neighbour, max_prefix: f'no neighbor {int_to_ip(neighbour)} maximum-prefix {max_prefix}', BGPFeatures.NEIGHBOUR_ROUTE_MAP_IN: lambda neighbour, route_map: f'no neighbor {int_to_ip(neighbour)}{route_map} in', BGPFeatures.NEIGHBOUR_ROUTE_MAP_OUT: lambda neighbour, route_map: f'no neighbor {int_to_ip(neighbour)}{route_map} out', BGPFeatures.NEIGHBOUR_NEXT_HOP_SELF: lambda neighbour: f'no neighbor {int_to_ip(neighbour)} next-hop-self', BGPFeatures.NEIGHBOUR_CAPABILITY_ORF_PREFIX_LIST: lambda neighbour, options: f'no neighbor {int_to_ip(neighbour)} capability orf prefix-list {options}', BGPFeatures.NEIGHBOUR_DEFAULT_ORIGINATE: lambda neighbour, route_map: f'no neighbor {int_to_ip(neighbour)} default-originate', BGPFeatures.NEIGHBOUR_ROUTE_REFLECTOR_CLIENT: lambda neighbour: f'no neighbor {int_to_ip(neighbour)} route-reflector-client', BGPFeatures.NEIGHBOUR_WEIGHT: lambda neighbour, weight: f'no neighbor {int_to_ip(neighbour)} weight', RouteMapFeatures.MATCH_INTERFACE: lambda rm, seq, interface: f'no match interface {interface}', RouteMapFeatures.MATCH_IP_PREFIX_LIST: lambda rm, seq, prefix_list: f'no match ip address prefix-list {prefix_list}', RouteMapFeatures.MATCH_IP_NEXT_HOP: lambda rm, seq, access_list: f'no match ip next-hop {access_list}', RouteMapFeatures.SET_INTERFACE: lambda rm, seq, interface: f'no set interface {interface}', RouteMapFeatures.SET_IP_DEFAULT_NEXT_HOP: lambda rm, seq, ip: f'no set ip default next-hop {int_to_ip(ip)}', RouteMapFeatures.SET_IP_NEXT_HOP: lambda rm, seq, ip: f'no set ip next-hop {int_to_ip(ip)}', RouteMapFeatures.SET_METRIC: lambda rm, seq, metric: f'no set metric {metric}', RouteMapFeatures.MATCH_AS_PATH_ACCESS_LIST: lambda rm, seq, as_path: f'no match as-path {as_path}', RouteMapFeatures.MATCH_COMMUNITY_LIST: lambda rm, seq, community, exact: f'no match community {community}', RouteMapFeatures.SET_LOCAL_PREFERENCE: lambda rm, seq, preference: f'no set local-preference {preference}', RouteMapFeatures.SET_AS_PATH_PREPEND: lambda rm, seq, AS: f'no set as-path prepend {AS}', RouteMapFeatures.SET_COMM_LIST_DELETE: lambda rm, seq, community: f'no set comm-list {community} delete', RouteMapFeatures.SET_COMMUNITY: lambda rm, seq, community, additive: f'no set community {community}{additive}', RouteMapFeatures.SET_ORIGIN: lambda rm, seq, origin: f'no set origin {origin}', RouteMapFeatures.SET_WEIGHT: lambda rm, seq, weight: f'no set weight {weight}', RouteMapFeatures.SET_METRIC_TYPE_INTERNAL: lambda rm, seq: f'no set metric-type internal', RouteMapFeatures.MATCH_FEATURE_BGP_OUT: lambda rm, seq, feature, *args: feature_disable[feature](rm, seq, *args), RouteMapFeatures.MATCH_FEATURE_BGP_IN: lambda rm, seq, feature, *args: feature_disable[feature](rm, seq, *args), RouteMapFeatures.SET_FEATURE_BGP_OUT: lambda rm, seq, feature, *args: feature_disable[feature](rm, seq, *args), RouteMapFeatures.SET_FEATURE_BGP_IN: lambda rm, seq, feature, *args: feature_disable[feature](rm, seq, *args), RouteMapFeatures.ROUTE_MAP_DENY: lambda rm, seq: route_map_permit(rm, seq), } filter_config = [RouteMapFeatures.ROUTE_MAP_DENY] bgp_af_features = [BGPFeatures.ADDITIONAL_PATHS] def config_mode(router, feature, arg): mode = { RouterFeatures: lambda router: (), OSPFFeatures: lambda router: (f'router ospf {router.ospf_proc}',), BGPFeatures: lambda router: (f'router bgp {router.AS}',) } if feature in interface_features: return f'interface {arg.name}', elif feature in filter_config: return () elif feature in bgp_af_features: return f'router bgp {router.AS}', f'address-family ipv4' elif type(feature) == RouteMapFeatures: rm, seq = arg return f'{rm} {rm.perm[seq]} {seq}', else: return mode[type(feature)](router) def exit_config_mode(feature): mode = { RouterFeatures: [], OSPFFeatures: [f'exit'], BGPFeatures: [f'exit'], RouteMapFeatures: [f'exit'] } if feature in filter_config: return [] elif feature in bgp_af_features: return [f'exit-address-family', f'exit'] else: return mode[type(feature)] def generate_maps_lists_config(router): config = [] for route_map in router.bgp_in_route_maps: for seq in route_map.perm: config.append(f'{route_map} {route_map.perm[seq]} {seq}') if seq in route_map.match_features: feature, *args = route_map.match_features[seq] config.append(feature_config[feature](route_map, seq, *args)) if seq in route_map.set_features: feature, *args = route_map.set_features[seq] config.append(feature_config[feature](route_map, seq, *args)) for route_map in router.bgp_out_route_maps: for seq in route_map.perm: config.append(f'{route_map} {route_map.perm[seq]} {seq}') if seq in route_map.match_features: feature, *args = route_map.match_features[seq] config.append(feature_config[feature](route_map, seq, *args)) if seq in route_map.set_features: feature, *args = route_map.set_features[seq] config.append(feature_config[feature](route_map, seq, *args)) for prefix_list in router.prefix_lists: for seq in prefix_list.perm: config.append(f'ip prefix-list {prefix_list} seq {seq} {prefix_list.perm[seq]} ' f'{int_to_ip(prefix_list.prefix[seq].address)}/' f'{prefix_list.prefix[seq].prefix}{prefix_list.eq[seq]}') for comm_list in router.comm_lists: comms = ' '.join(comm_list.comms) config.append(f'ip community-list {comm_list.name} {comm_list.perm} {comms}') for as_path_list in router.as_path_lists: config.append(f'ip as-path access-list {as_path_list.name} {as_path_list.perm} {as_path_list.regex}') for access_list in router.access_lists: config.append(f'access-list {access_list.num} {access_list.perm} {int_to_ip(access_list.net.address)} {int_to_lower_mask(access_list.net.prefix)}') return config def generate_ospf_config(router): config = ['router ospf ' + str(router.ospf_proc), f' router-id {int_to_ip(router.router_id)}'] for area in router.ospf_areas: for net in area.networks: config.append(f' network {int_to_ip(net.address)} {int_to_lower_mask(net.prefix)} area {area}') if area.type == OSPF_Area_Type.NSSA: config.append(' area ' + str(area) + ' nssa') elif area.type == OSPF_Area_Type.STUB: config.append(' area ' + str(area) + ' stub') return config def generate_bgp_config(router): config = [f'router bgp {router.AS.num}', f' bgp router-id {int_to_ip(router.router_id)}'] for neighbour in router.bgp_neighbours: config.append(f' neighbor {int_to_ip(neighbour.address)} remote-as {neighbour.AS.num}') config.append(f' neighbor {int_to_ip(neighbour.address)} update-source {neighbour.interface.name}') config.append(f' neighbor {int_to_ip(neighbour.address)} advertisement-interval 0') config.append(' address-family ipv4') for net in router.AS.networks: config.append(f' network {int_to_ip(net.address)} mask {int_to_upper_mask(net.prefix)}') for neighbour in router.bgp_neighbours: config.append(f' neighbor {int_to_ip(neighbour.address)} activate') config.append(' exit-address-family') return config def get_base_config(router): config = [f'hostname {router.name}', 'interface loopback 0', f' ip address {int_to_ip(router.router_id)} {int_to_upper_mask(32)}'] for interface in router.interfaces: if interface.address is not None: config.append(f'interface {interface.name}') config.append(' ip address ' + int_to_ip(interface.address) + ' ' + int_to_upper_mask(interface.prefix)) if interface.area is not None: config.append(f' ip ospf {router.ospf_proc} area {interface.area}') for network, interface in router.fixed_static_routes: config.append(f'ip route {int_to_ip(network.address)} {int_to_upper_mask(network.prefix)} {interface.name}') if Protocols.OSPF in router.enabled_protocols or Protocols.BGP in router.enabled_protocols: config.extend(generate_ospf_config(router)) if Protocols.BGP in router.enabled_protocols: config.extend(generate_bgp_config(router)) config.extend(generate_maps_lists_config(router)) return config def write_config(router, path): config = get_base_config(router) os.makedirs(os.path.dirname(path), exist_ok=True) with open(f'{path}{router.name}.cfg', 'w') as f: f.write('\n'.join(config))
python
################################################# # Implements a dynamical dense layer that allows # both adding and removing both input and output # features and a simple update step for both. # # Inspired by "Lifelong Learning with Dynamically # Expandable Networks", ICLR, 2017 (arXiv:1708.01547) ################################################# import tensorflow as tf import numpy as np class DynamicMatrix: """The dynamic matrix that allows adding and removing features""" def __init__(self, shape, std=0.1): self.gradient_step = tf.Variable(0.0, trainable=False) if shape is not None: self.mat = tf.Variable(tf.random.normal(shape, stddev=std), trainable=True) self.mom = tf.Variable(np.zeros(shape).astype("float32"), trainable=False) self.mom2 = tf.Variable(np.zeros(shape).astype("float32"), trainable=False) self.dim = len(shape) @classmethod def from_state(cls, state): obj = cls(None) obj.mat = state[0] obj.mom = state[1] obj.mom2 = state[2] return obj def expand_out(self, n, std): """Add a random output feature""" new_row = tf.random.normal(self.mat.shape[:-1] + (n,), stddev=std) self.mat = tf.Variable( tf.concat([self.mat, new_row], self.dim - 1), trainable=True ) # Set momenta for the new row to zero mom_row = tf.Variable(np.zeros((self.mom.shape[:-1] + (n,))).astype("float32")) self.mom = tf.Variable( tf.concat([self.mom, mom_row], self.dim - 1), trainable=False ) mom2_row = tf.Variable( np.zeros((self.mom2.shape[:-1] + (n,))).astype("float32") ) self.mom2 = tf.Variable( tf.concat([self.mom2, mom2_row], self.dim - 1), trainable=False ) def contract_out(self, n, index): """Remove a random output feature""" if self.shape[-1] > 1: start = [0 for x in self.shape] size = list(self.shape) size[-1] = n * index new_mat = tf.slice(self.mat, start, size) new_mom = tf.slice(self.mom, start, size) new_mom2 = tf.slice(self.mom2, start, size) start[-1] = n * (index + 1) size[-1] = self.shape[-1] - n * (index + 1) new_mat = tf.concat( [new_mat, tf.slice(self.mat, start, size)], self.dim - 1 ) new_mom = tf.concat( [new_mom, tf.slice(self.mom, start, size)], self.dim - 1 ) new_mom2 = tf.concat( [new_mom2, tf.slice(self.mom2, start, size)], self.dim - 1 ) self.mat = tf.Variable(new_mat, trainable=True) self.mom = tf.Variable(new_mom, trainable=False) self.mom2 = tf.Variable(new_mom2, trainable=False) def expand_in(self, n, std): """Add a random input feature""" new_column = tf.random.normal( self.mat.shape[:-2] + (n, self.mat.shape[-1]), stddev=std ) self.mat = tf.Variable( tf.concat([self.mat, new_column], self.dim - 2), trainable=True ) # Set momenta for the new row to zero mom_column = tf.Variable( np.zeros(self.mom.shape[:-2] + (n, self.mom.shape[-1])).astype("float32") ) self.mom = tf.Variable( tf.concat([self.mom, mom_column], self.dim - 2), trainable=False ) mom2_column = tf.Variable( np.zeros(self.mom2.shape[:-2] + (n, self.mom2.shape[-1])).astype("float32") ) self.mom2 = tf.Variable( tf.concat([self.mom2, mom2_column], self.dim - 2), trainable=False ) def contract_in(self, n, index): """Remove a random input feature""" if self.mat.shape[-2] > 1: start = [0 for x in self.shape] size = list(self.shape) size[-2] = n * index new_mat = tf.slice(self.mat, start, size) new_mom = tf.slice(self.mom, start, size) new_mom2 = tf.slice(self.mom2, start, size) start[-2] = n * (index + 1) size[-2] = self.shape[-2] - n * (index + 1) new_mat = tf.concat( [new_mat, tf.slice(self.mat, start, size)], self.dim - 2 ) new_mom = tf.concat( [new_mom, tf.slice(self.mom, start, size)], self.dim - 2 ) new_mom2 = tf.concat( [new_mom2, tf.slice(self.mom2, start, size)], self.dim - 2 ) self.mat = tf.Variable(new_mat, trainable=True) self.mom = tf.Variable(new_mom, trainable=False) self.mom2 = tf.Variable(new_mom2, trainable=False) def get_state(self): return (self.mat, self.mom, self.mom2) def set_state(self, state): assert not isinstance(state[0], tf.Tensor) assert not isinstance(state[1], tf.Tensor) assert not isinstance(state[2], tf.Tensor) self.mat = state[0] self.mom = state[1] self.mom2 = state[2] def apply_adam(self, gradient, alpha=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8): """The Adam gradient descent method""" t = self.gradient_step.assign_add(1.0) mom = self.mom.assign(beta1 * self.mom + (1 - beta1) * gradient) mom2 = self.mom2.assign(beta2 * self.mom2 + (1 - beta2) * gradient * gradient) mom_hat = mom / (1 - tf.pow(beta1, t)) mom2_hat = mom2 / (1 - tf.pow(beta2, t)) self.mat.assign_add(-alpha * mom_hat / (tf.sqrt(mom2_hat) + epsilon)) @property def shape(self): return self.mat.get_shape().as_list() class DynamicDenseLayer: """A single dense layer with dynamic input and output size""" def __init__(self, input_size, output_size, new_weight_std=0.1): """Create the layer with a given initial configuration""" if input_size is not None: self.w = DynamicMatrix((input_size, output_size), 0.1) self.b = DynamicMatrix((1, output_size), 0.1) self.dynamic = True self.input_size = input_size self.output_size = output_size self.new_weight_std = new_weight_std @classmethod def from_state(cls, state, new_weight_std=0.1): """Initialize from state tuple (or list)""" obj = cls(None, None) obj.w = DynamicMatrix.from_state(state[0]) obj.b = DynamicMatrix.from_state(state[1]) obj.input_size = state[2] obj.output_size = state[3] obj.new_weight_std = 0.01 return obj def expand_out(self): """Add a random output feature""" self.w.expand_out(1, self.new_weight_std) self.b.expand_out(1, self.new_weight_std) self.output_size = self.output_size + 1 def contract_out(self, index): """Remove a random output feature""" if self.output_size > 1: self.w.contract_out(1, index) self.b.contract_out(1, index) self.output_size = self.output_size - 1 def expand_in(self): """Add a random input feature""" self.w.expand_in(1, self.new_weight_std) self.input_size = self.input_size + 1 def contract_in(self, index): """Remove a random input feature""" if self.input_size > 1: self.w.contract_in(1, index) self.input_size = self.input_size - 1 @property def trainable_variables(self): """Returns a list of trainable variables""" return [self.w.mat, self.b.mat] def get_state(self): """Returns the current state of the layer""" return ( self.w.get_state(), self.b.get_state(), self.input_size, self.output_size, ) # the given state def set_state(self, state): """Overwrite the current state of the layer with with the given state """ assert not isinstance(state[0], tf.Tensor) assert not isinstance(state[1], tf.Tensor) self.w.set_state(state[0]) self.b.set_state(state[1]) self.input_size = state[2] self.output_size = state[3] def weight_count(self): """Return the number of weights in the layer""" return self.input_size * self.output_size + self.output_size def summary_string(self): return "({}, {})".format(self.input_size, self.output_size) def apply_adam(self, gradients, alpha=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8): self.w.apply_adam(gradients[0], alpha, beta1, beta2, epsilon) self.b.apply_adam(gradients[1], alpha, beta1, beta2, epsilon) def __call__(self, inputs): """Apply the layer""" assert self.w.shape == [self.input_size, self.output_size] assert self.b.shape == [1, self.output_size] return tf.matmul(inputs, self.w.mat) + self.b.mat class DynamicConv2DLayer: """A convolution layer with dynamic filter size""" def __init__(self, width, input_size, output_size, new_weight_std=0.1): """Create the layer with a given initial configuration""" if input_size is not None: self.w = DynamicMatrix((width, width, input_size, output_size), 0.1) self.dynamic = True self.width = width self.input_size = input_size self.output_size = output_size self.new_weight_std = new_weight_std @classmethod def from_state(cls, state, new_weight_std=0.1): """Initialize from state tuple (or list)""" obj = cls(None, None) obj.w = DynamicMatrix.from_state(state[0]) obj.width = state[1] obj.input_size = state[2] obj.output_size = state[3] obj.new_weight_std = 0.01 return obj def expand_out(self): """Add a random output feature""" self.w.expand_out(1, self.new_weight_std) self.output_size = self.output_size + 1 def contract_out(self, n): """Remove a random output feature""" if self.output_size > 1: self.w.contract_out(1, n) self.output_size = self.output_size - 1 def contract_in(self, n): """Remove a random input feature""" if self.input_size > 1: self.w.contract_in(1, n) self.input_size = self.input_size - 1 def expand_in(self): """Add a random input feature""" self.w.expand_in(1, self.new_weight_std) self.input_size = self.input_size + 1 @property def trainable_variables(self): """Returns a list of trainable variables""" return [self.w.mat] def get_state(self): """Returns the current state of the layer""" return (self.w.get_state(), self.width, self.input_size, self.output_size) # the given state def set_state(self, state): """Overwrite the current state of the layer with the given state """ assert not isinstance(state[0], tf.Tensor) self.w.set_state(state[0]) self.width = state[1] self.input_size = state[2] self.output_size = state[3] def weight_count(self): """Return the number of weights in the layer""" return self.width * self.width * self.input_size * self.output_size def summary_string(self): return "({}, {}, {}, {})".format( self.width, self.width, self.input_size, self.output_size ) def apply_adam(self, gradients, alpha=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8): self.w.apply_adam(gradients[0], alpha, beta1, beta2, epsilon) def __call__(self, inputs): """Apply the layer""" assert self.w.shape == [ self.width, self.width, self.input_size, self.output_size, ] return tf.nn.conv2d(inputs, self.w.mat, 2, "SAME") class DynamicConv2DToDenseLayer: """Flattens the output of a conv2d layer and allows adding and removing neurons correctly in between """ def __init__(self, pixels, features, output_size, new_weight_std=0.1): """Create the layer with a given initial configuration""" if pixels is not None: self.w = DynamicMatrix((pixels * features, output_size), 0.1) self.b = DynamicMatrix((1, output_size), 0.1) self.dynamic = True self.pixels = pixels self.features = features self.output_size = output_size self.new_weight_std = new_weight_std @classmethod def from_state(cls, state, new_weight_std=0.1): """Initialize from state tuple (or list)""" obj = cls(None, None) obj.w = DynamicMatrix.from_state(state[0]) obj.b = DynamicMatrix.from_state(state[1]) obj.features = state[2] obj.output_size = state[3] obj.new_weight_std = new_weight_std return obj def expand_out(self): """Add a random output feature""" self.w.expand_out(1, self.new_weight_std) self.b.expand_out(1, self.new_weight_std) self.output_size = self.output_size + 1 def contract_out(self, n): """Remove a random output feature""" if self.output_size > 1: self.w.contract_out(1, n) self.b.contract_out(1, n) self.output_size = self.output_size - 1 def expand_in(self): """Add a random input feature""" self.w.expand_in(self.pixels, self.new_weight_std) self.features = self.features + 1 def contract_in(self, n): """Remove a random input feature""" if self.features > 1: self.w.contract_in(self.pixels, n) self.features = self.features - 1 @property def trainable_variables(self): """Returns a list of trainable variables""" return [self.w.mat, self.b.mat] def get_state(self): """Returns the current state of the layer""" return ( self.w.get_state(), self.b.get_state(), self.pixels, self.features, self.output_size, ) def set_state(self, state): """Overwrite the current state of the layer with the given state""" assert not isinstance(state[0], tf.Tensor) assert not isinstance(state[1], tf.Tensor) self.w.set_state(state[0]) self.b.set_state(state[1]) self.pixels = state[2] self.features = state[3] self.output_size = state[4] def weight_count(self): """Return the number of weights in the layer""" return self.pixels * self.features * self.output_size + self.output_size def summary_string(self): return "({}, {}, {})".format(self.pixels, self.features, self.output_size) def apply_adam(self, gradients, alpha=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8): self.w.apply_adam(gradients[0], alpha, beta1, beta2, epsilon) self.b.apply_adam(gradients[1], alpha, beta1, beta2, epsilon) def __call__(self, inputs): """Apply the layer""" assert self.w.shape == [self.pixels * self.features, self.output_size] assert self.b.shape == [1, self.output_size] # Move pixels to the last columns, so that it is easier to add and remove x = tf.transpose(inputs, perm=[0, 3, 1, 2]) # Now flatten x = tf.reshape(x, [x.shape[0], -1]) x = tf.matmul(x, self.w.mat) + self.b.mat return x class DynamicModel: """A model formed of a number of dynamical dense layers""" def __init__(self, layers, new_weight_std=0.1, activation=tf.nn.relu): """Create the initial model configuration""" # A list of layersr in this model self.layers = layers # Variables related to fully connected part self.new_weight_std = new_weight_std self.input_size = self.layers[0].input_size self.output_size = self.layers[-1].output_size self.activation = activation def weight_count(self): """Returns the number of weights currently in the model""" count = 0 for layer in self.layers: if layer.dynamic: count += layer.weight_count() return count def summary(self): """Print a summary of the layers in this model""" num_weights = 0 for i, l in enumerate(self.layers): if l.dynamic: l_weights = l.weight_count() num_weights += l_weights print( "Layer {}: {}, number of weights {}".format( i, l.summary_string(), l_weights ) ) print("Total: {} weights".format(num_weights)) def expand(self): """Add a feature""" # Pick a layer nl = (int)((len(self.layers) - 1) * np.random.rand()) l1 = self.layers[nl] l2 = self.layers[nl + 1] if not l1.dynamic or not l2.dynamic: return # Expand the number of outputs in the layer # and the number of inputs in the next one l1.expand_out() l2.expand_in() def contract(self): """Remove a random feature""" # Pick a layer nl = (int)((len(self.layers) - 1) * np.random.rand()) l1 = self.layers[nl] l2 = self.layers[nl + 1] if not l1.dynamic or not l2.dynamic: return # Choose a random feature n = (int)(l1.output_size * np.random.rand()) # remove it from both the layer and the next one l1.contract_out(n) l2.contract_in(n) def update_features( self, data, loss_function, weight_penalty=1e-9, layer_change_rate=0.1 ): """Stochastic update: add or remove a feature if it decreases the loss function """ # Get the current loss, including the weight penalty initial_loss = loss_function(data) + weight_penalty * self.weight_count() # Make note of the current state initial_state = self.get_state() # Randomly choose wether to add or remove if np.random.rand() > 0.5: self.expand() else: self.contract() # Calculate the loss in the new network new_loss = loss_function(data) + weight_penalty * self.weight_count() # and the change in the loss dloss = new_loss - initial_loss # If the loss increases, return to the original state if dloss > 0: self.set_state(initial_state) accepted = False else: accepted = True # self.summary() return accepted def trainable_variables(self): """Returns a list of trainable variables""" return [var for layer in self.layers for var in layer.trainable_variables] def get_state(self): """Returns the current state of the model""" state = [] for layer in self.layers: if layer.dynamic: state.append(layer.get_state()) return state def set_state(self, state): """Overwrite the current state""" i = 0 for layer in self.layers: if layer.dynamic: layer.set_state(state[i]) i = i + 1 def apply_adam(self, gradients, alpha=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8): """Update the weights using the ADAM update method""" var_index = 0 for layer in self.layers: n_vars = len(layer.trainable_variables) layer.apply_adam( gradients[var_index : var_index + n_vars], alpha, beta1, beta2, epsilon ) var_index += n_vars def __call__(self, inputs): """Apply the model""" x = inputs for layer in self.layers[:-1]: x = layer(x) x = self.activation(x) x = self.layers[-1](x) return x # ------------------------------- # Add or remove dense layers # ------------------------------- def add_layer(self): """Add a dense layer. The new layer starts close to an identity operation. """ # Pick a layer nl = (int)((len(self.layers) - 1) * np.random.rand()) l1 = self.layers[nl] # Build an intermediate layer. Start close to one stdiv = self.new_weight_std / (l1.output_size) new_w = tf.Variable( tf.eye(l1.output_size) + tf.random.normal((l1.output_size, l1.output_size), stddev=stdiv), trainable=True, ) new_b = tf.Variable( tf.random.normal((l1.output_size,), stddev=stdiv), trainable=True ) new_layer = DynamicDenseLayer.from_state( (new_w, new_b, l1.output_size, l1.output_size) ) self.layers.insert(nl + 1, new_layer) def remove_layer(self): """Remove a layer. Remove the activation function between two layers and merge the now linear operations. """ if len(self.layers) > 2: # Pick a layer nl = (int)((len(self.layers) - 1) * np.random.rand()) # Just drop the activation between the layer and the next, # reducing them to a single linear operation l1 = self.layers[nl] l2 = self.layers[nl + 1] # Pull the states of the two layers and construct new variables st1 = l1.get_state() st2 = l2.get_state() new_w = tf.Variable(tf.matmul(st1[0], st2[0]), trainable=True) new_b = tf.Variable( tf.matmul(tf.expand_dims(st1[1], 0), st2[0])[0, :] + st2[1], trainable=True, ) assert new_w.shape == (l1.input_size, l2.output_size) # Build the new layer state = [new_w, new_b, l1.input_size, l2.output_size] new_layer = DynamicDenseLayer.from_state(state) del self.layers[nl] del self.layers[nl] self.layers.insert(nl, new_layer)
python
#Import Libraries from sklearn.linear_model import Lasso from sklearn.metrics import mean_absolute_error from sklearn.metrics import mean_squared_error from sklearn.metrics import median_absolute_error #---------------------------------------------------- #Applying Lasso Regression Model ''' #sklearn.linear_model.Lasso(alpha=1.0, fit_intercept=True, normalize=False, precompute= # False, copy_X=True, max_iter=1000, tol=0.0001, # warm_start=False, positive=False, random_state=None,selection='cyclic') ''' LassoRegressionModel = Lasso(alpha=1.0,random_state=33,normalize=False) LassoRegressionModel.fit(X_train, y_train) #Calculating Details print('Lasso Regression Train Score is : ' , LassoRegressionModel.score(X_train, y_train)) print('Lasso Regression Test Score is : ' , LassoRegressionModel.score(X_test, y_test)) print('Lasso Regression Coef is : ' , LassoRegressionModel.coef_) print('Lasso Regression intercept is : ' , LassoRegressionModel.intercept_) print('----------------------------------------------------') #Calculating Prediction y_pred = LassoRegressionModel.predict(X_test) print('Predicted Value for Lasso Regression is : ' , y_pred[:10]) #---------------------------------------------------- #Calculating Mean Absolute Error MAEValue = mean_absolute_error(y_test, y_pred, multioutput='uniform_average') # it can be raw_values print('Mean Absolute Error Value is : ', MAEValue) #---------------------------------------------------- #Calculating Mean Squared Error MSEValue = mean_squared_error(y_test, y_pred, multioutput='uniform_average') # it can be raw_values print('Mean Squared Error Value is : ', MSEValue) #---------------------------------------------------- #Calculating Median Squared Error MdSEValue = median_absolute_error(y_test, y_pred) print('Median Squared Error Value is : ', MdSEValue )
python
from django.urls import path from . import views urlpatterns = [ path('', views.home, name='home'), path('search', views.tweets_search, name='tweets_search'), path('articles', views.articles, name='articles'), path('portals', views.portals, name='portals'), path('graphics', views.graphics, name='graphics'), ]
python
import torch from torch import Tensor from torch import nn from typing import Union, Tuple, List, Iterable, Dict import os import json class LayerNorm(nn.Module): def __init__(self, dimension: int): super(LayerNorm, self).__init__() self.dimension = dimension self.norm = nn.LayerNorm(dimension) def forward(self, features: Dict[str, Tensor]): features['sentence_embedding'] = self.norm(features['sentence_embedding']) return features def get_sentence_embedding_dimension(self): return self.dimension def save(self, output_path): with open(os.path.join(output_path, 'config.json'), 'w') as fOut: json.dump({'dimension': self.dimension}, fOut, indent=2) torch.save(self.state_dict(), os.path.join(output_path, 'pytorch_model.bin')) @staticmethod def load(input_path): with open(os.path.join(input_path, 'config.json')) as fIn: config = json.load(fIn) model = LayerNorm(**config) model.load_state_dict(torch.load(os.path.join(input_path, 'pytorch_model.bin'), map_location=torch.device('cpu'))) return model
python
hp = __import__('heap'); #place heap.py (max_heap.py - name changed) in same directory class HeapSort(object): def __init__(self, arr): super(HeapSort, self).__init__() self.arr = arr def printH(self): print(self.arr) def heapSort(self): heap = hp.Heap() heap.createHeap(*self.arr) i = 0 while(heap.size > 0): self.arr[i] = heap.delete() i += 1 arr = [4, 13, 6, 2, 87, 21, 65] heapSort = HeapSort(arr) heapSort.printH() heapSort.heapSort() heapSort.printH()
python
from setuptools import setup with open("README.md", "r") as fh: long_description = fh.read() requirements = [ 'DAWG-Python==0.7.2', 'docopt==0.6.2', 'psycopg2==2.8.6', 'pymorphy2==0.9.1', 'pymorphy2-dicts-ru==2.4.417127.4579844' ] setup( name='search_engine_rishatsadykov', version='1.1', packages=['lemmatization'], url='https://github.com/rishat11/information-retrieval', classifiers=[ 'Programming Language :: Python :: 3.8', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', ], author='Rishat Sadykov', author_email='[email protected]', description='Calculates TF-IDF for data set', long_description=long_description, long_description_content_type='text/markdown', install_requires=requirements, python_requires='>=3.6', )
python
from collections import deque from random import randint import settings from datatypes import Vector, Position, Draw class Player: HEAD_CHAR = "%" BODY_CHAR = "@" TAIL_CHAR = "*" DEAD_HEAD_CHAR = "x" DEAD_BODY_CHAR = "@" DEAD_TAIL_CHAR = "+" UP = Vector(0, -1) DOWN = Vector(0, 1) LEFT = Vector(-1, 0) RIGHT = Vector(1, 0) DIRECTIONS = [UP, DOWN, LEFT, RIGHT] keymap = {37: LEFT, 38: UP, 39: RIGHT, 40: DOWN } def __init__(self, player_id, name, ws): self._id = player_id self.name = name self.ws = ws self.alive = False self.direction = None def new_snake(self, color): self.color = color self.grow = 0 self.score = 0 self.alive = True self.snake = deque() def render_new_snake(self): # try to spawn snake at some distance from world's borders distance = settings.INIT_LENGHT + 2 x = randint(distance, settings.FIELD_SIZE_X - distance) y = randint(distance, settings.FIELD_SIZE_Y - distance) self.direction = self.DIRECTIONS[randint(0, 3)] # create snake from tail to head render = [] pos = Position(x, y) for i in range(0, settings.INIT_LENGHT): self.snake.appendleft(pos) if i == 0: char = self.TAIL_CHAR elif i == settings.INIT_LENGHT - 1: char = self.HEAD_CHAR else: char = self.BODY_CHAR render.append(Draw(pos.x, pos.y, char, self.color)) pos = self.next_position() return render def next_position(self): # next position of the snake's head return Position(self.snake[0].x + self.direction.xdir, self.snake[0].y + self.direction.ydir) def render_move(self): # moving snake to the next position render = [] new_head = self.next_position() self.snake.appendleft(new_head) # draw head in the next position render.append(Draw(new_head.x, new_head.y, self.HEAD_CHAR, self.color)) # draw body in the old place of head render.append(Draw(self.snake[1].x, self.snake[1].y, self.BODY_CHAR, self.color)) # if we grow this turn, the tail remains in place if self.grow > 0: self.grow -= 1 else: # otherwise the tail moves old_tail = self.snake.pop() render.append(Draw(old_tail.x, old_tail.y, " ", 0)) new_tail = self.snake[-1] render.append(Draw(new_tail.x, new_tail.y, self.TAIL_CHAR, self.color)) return render def render_game_over(self): render = [] # dead snake for i, pos in enumerate(self.snake): if i == 0: render.append(Draw(pos.x, pos.y, self.DEAD_HEAD_CHAR, 0)) elif i == len(self.snake) - 1: render.append(Draw(pos.x, pos.y, self.DEAD_TAIL_CHAR, 0)) else: render.append(Draw(pos.x, pos.y, self.DEAD_BODY_CHAR, 0)) return render def keypress(self, code): if not self.alive: return direction = self.keymap.get(code) if direction: # do not move in the opposite direction if not (self.direction and direction.xdir == -self.direction.xdir and direction.ydir == -self.direction.ydir): self.direction = direction
python
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) import pytest from spack.main import SpackCommand, SpackCommandError info = SpackCommand('env') @pytest.mark.parametrize('pkg', [ ('zlib',), ('zlib', '--') ]) @pytest.mark.usefixtures('config') def test_it_just_runs(pkg): info(*pkg) @pytest.mark.parametrize('pkg,error_cls', [ ('zlib libszip', SpackCommandError), ('', IndexError) ]) @pytest.mark.usefixtures('config') def test_it_just_fails(pkg, error_cls): with pytest.raises(error_cls): info(pkg)
python
"""django_maps URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url, include from django.contrib import admin from maps import views from maps import api urlpatterns = [ url(r'^admin/', admin.site.urls), url(r'^$',views.landing), url(r'^auth/register', views.registration), url(r'^auth/', include("registration.backends.simple.urls")), #This will be entry point to SPA url(r'^users/(?P<userName>[\w\-]+)/adventures/$',views.profileViewer), #Editor SPA url(r'^editor/$', views.editorViewer), #API URLs url(r'^api/rest/userInfo/(?P<userId>\d+)$', api.userInfo), #get, post url(r'^api/rest/adventures$', api.adventures), #post url(r'^api/rest/advsOverview/(?P<userId>\d+)$', api.advsOverview), #get url(r'^api/rest/adventures/(?P<advId>\d+)$', api.adventures), #get, delete url(r'^api/rest/advMaps/(?P<advId>\d+)$', api.advMaps), url(r'^api/rest/maps/(?P<mapId>\d+)$', api.maps), url(r'^api/rest/mapSegment$', api.mapSegment), #post url(r'^api/rest/profilePhoto$', api.profilePhoto), #post only ]
python
""" Just a process to a centralized basic create user from password and username """ from flask import request, redirect, render_template, session, flash, abort, jsonify, Response, flash import random import json from flask_babel import _ from datetime import datetime, timedelta import uuid from urllib.parse import urlencode import didkit from datetime import timedelta, datetime import logging logging.basicConfig(level=logging.INFO) from factory import createidentity, createcompany from components import sms, directory, ns, company, privatekey from signaturesuite import vc_signature from protocol import Document #PRESENTATION_DELAY = timedelta(seconds= 10*60) DID_WEB = 'did:web:talao.cp' DID_ETHR = 'did:ethr:0xee09654eedaa79429f8d216fa51a129db0f72250' DID_TZ = 'did:tz:tz2NQkPq3FFA3zGAyG8kLcWatGbeXpHMu7yk' DID_KEY = 'did:key:zQ3shWBnQgxUBuQB2WGd8iD22eh7nWC4PTjjTjEgYyoC3tjHk' DID = DID_TZ did_selected = 'did:tz:tz2NQkPq3FFA3zGAyG8kLcWatGbeXpHMu7yk' CREDENTIAL_TOPIC = ['experience', 'training', 'recommendation', 'work', 'salary', 'vacation', 'internship', 'relocation', 'end_of_work', 'hiring'] def init_app(app, red, mode) : app.add_url_rule('/register/identity', view_func= register_identity, methods = ['GET', 'POST'], defaults={'mode': mode}) #app.add_url_rule('/register', view_func=register_user, methods = ['GET', 'POST'], defaults={'mode': mode}) # idem below app.add_url_rule('/register', view_func=register_qrcode, methods = ['GET', 'POST'], defaults={'mode': mode}) # idem below app.add_url_rule('/register/user', view_func=register_user, methods = ['GET', 'POST'], defaults={'mode': mode}) app.add_url_rule('/register/company', view_func=register_company, methods = ['GET', 'POST'], defaults={'mode': mode}) app.add_url_rule('/register/password', view_func=register_password, methods = [ 'GET', 'POST'], defaults={'mode': mode}) app.add_url_rule('/register/qrcode', view_func=register_qrcode, methods = [ 'GET', 'POST'], defaults={'mode': mode}) app.add_url_rule('/register/wallet/user', view_func=register_wallet_user, methods = [ 'GET', 'POST'], defaults={'mode': mode, 'red' : red}) app.add_url_rule('/register/code', view_func=register_code, methods = ['GET', 'POST'], defaults={'mode': mode}) app.add_url_rule('/register/post_code', view_func=register_post_code, methods = ['POST', 'GET'], defaults={'mode': mode}) app.add_url_rule('/register/wallet_endpoint/<id>', view_func=register_wallet_endpoint, methods = ['POST', 'GET'], defaults={'mode': mode, 'red' : red}) app.add_url_rule('/register/stream', view_func=register_stream, defaults={'red' : red}) app.add_url_rule('/register/error', view_func=register_error) app.add_url_rule('/register/create_for_wallet', view_func=register_create_for_wallet, methods = ['POST', 'GET'], defaults={'mode': mode}) return def register_company(mode) : """ create company # route /register/company """ if request.method == 'GET' : return render_template('register/company_register.html') if request.method == 'POST' : credentials_supported = list() for topic in CREDENTIAL_TOPIC : if request.form.get(topic) : credentials_supported.append(request.form[topic]) username = request.form['company_name'].lower() siren = request.form['siren'] if ns.username_exist(username, mode) : username = username + str(random.randint(1, 100)) if request.form['promo'] in ["TEST"] : promo = 50 else : promo = 10 workspace_contract = createcompany.create_company(request.form['contact_email'],username, None, mode, siren=request.form['siren'])[2] if workspace_contract : directory.add_user(mode, request.form['company_name'], username, siren) filename = mode.db_path + 'company.json' personal = json.load(open(filename, 'r')) personal['contact_name']['claim_value'] = request.form['contact_name'] personal['name']['claim_value'] = request.form['company_name'] personal['contact_email']['claim_value'] = request.form['contact_email'] personal['contact_phone']['claim_value'] = request.form['contact_phone'] personal['website']['claim_value'] = request.form['website'] personal['siren']['claim_value'] = request.form['siren'] personal['postal_address']['claim_value'] = request.form['postal_address'] personal['credentials_supported'] = credentials_supported personal['picture'] = 'QmXKeAgNZhLibNjYJFHCiXFvGhqsqNV2sJCggzGxnxyhJ5' personal['signature'] = 'QmPZxzrmh29sNcgrT7hyrrP6BWyahLwYUvzbuf5vUFxw91' personal['credential_counter'] = 0 personal['credential_acquired'] = promo ns.update_personal(workspace_contract, json.dumps(personal, ensure_ascii=False), mode) # init first campaign new_campaign = company.Campaign(session['username'], mode) data = {'description' : request.form['description'], 'nb_subject' : 0, 'startDate' : '', 'endDate' : '', 'credentials_supported' : credentials_supported} campaign_code = "camp" + str(random.randint(100, 999)) new_campaign.add(campaign_code , json.dumps(data, ensure_ascii=False)) return render_template('register/company_end_of_registration.html', campaign_code=campaign_code) else : flash(_('Company registration failed'), 'danger') return redirect(mode.server + 'register/company') def register_user(mode) : if request.method == 'GET' : #session.clear() if session.get('code_sent') : del session['code_sent'] session['is_active'] = True return render_template("/register/user_register.html") if request.method == 'POST' : session['email'] = request.form['email'] session['firstname'] = request.form['firstname'] session['lastname'] = request.form['lastname'] session['username'] = ns.build_username(session['firstname'], session['lastname'], mode) session['phone'] = request.form['phone'] session['search_directory'] = request.form.get('CGU') message = "" if not request.form.get('CGU') : message = _('Accept the service conditions to move next step.') phone = session['phone'] if not sms.check_phone(session['phone'], mode) : message = _('Incorrect phone number.') phone = '' if message : flash(message, 'warning') return render_template("/register/user_register.html", firstname=session['firstname'], lastname=session['lastname'], email=session['email'], phone=phone) return redirect (mode.server + 'register/identity') def register_identity(mode) : session['did'] = 'tz' return redirect (mode.server + 'register/password') # route /register/password/ def register_password(mode): if not session.get('is_active') : flash(_('Session expired'), 'warning') return redirect(mode.server + 'register') if request.method == 'GET' : return render_template("/register/register_password.html") if request.method == 'POST' : session['password'] = request.form['password'] if not session.get('code_sent') : session['code'] = str(random.randint(100000, 999999)) session['code_sent'] = True session['code_delay'] = datetime.now() + timedelta(seconds= 180) session['try_number'] = 0 if sms.send_code(session['phone'], session['code'], mode) : logging.info('secret code sent = %s', session['code']) else : logging.error('sms connexion probleme register_password') flash(_('SMS failed.'), 'warning') return render_template("user_register.html" ) return render_template("/register/register_code.html") # route /register/code/ def register_code(mode) : if not session.get('is_active') or 'try_number' not in session : flash(_('Session expired'), 'warning') return redirect(mode.server + 'register') session['try_number'] +=1 logging.info('code received = %s', request.form.get('mycode')) if request.form['mycode'] == session['code'] and datetime.now() < session['code_delay'] and session['try_number'] < 4 : if not createidentity.create_user(session['username'], session['email'], mode, did=session['did'], firstname=session['firstname'], lastname=session['lastname'], phone=session['phone'], password=session['password'])[2] : logging.error('createidentity failed') flash(_('Transaction failed.'), 'warning') return render_template("/register/user_register.html" ) directory.add_user(mode, session['username'], session['firstname'] + ' ' + session['lastname'], None) # success exit return render_template("/register/end_of_registration.html", username=session['username']) elif session['try_number'] == 3 : session['is_active'] = False flash(_("Code is incorrect. Too many trials."), 'warning') message = _("Registration failed") return render_template("/register/registration_error.html") elif datetime.now() > session['code_delay'] : session['is_active'] = False flash(_('Code expired'), 'warning') return render_template("/register/registration_error.html") else : if session['try_number'] == 1 : message = _('Code is incorrect, 2 trials left.') if session['try_number'] == 2 : message = _('Code is incorrect, last trial.') flash(message, 'warning') return render_template("/register/register_code.html") # route register/post_code def register_post_code(mode) : if session.get('wallet') == 'ok' : return redirect (mode.server + 'login') try : username = session['username'] session.clear() return redirect (mode.server + 'login?username=' + username) except : return redirect (mode.server + 'login') #########################################Register with wallet ##################################### def register_qrcode(mode) : if request.method == 'GET' : id = str(uuid.uuid1()) url = mode.server + 'register/wallet_endpoint/' + id + '?' + urlencode({"issuer" : DID}) deeplink = mode.deeplink + 'app/download?' + urlencode({'uri' : url}) return render_template("/register/register_wallet_qrcode.html", url=url, deeplink=deeplink, id=id) def register_wallet_endpoint(id,red, mode): if request.method == 'GET': challenge = str(uuid.uuid1()) did_auth_request = { "type": "VerifiablePresentationRequest", "query": [ { "type": "QueryByExample", "credentialQuery": [] } ], "challenge": challenge, "domain" : mode.server} return jsonify(did_auth_request) if request.method == 'POST': presentation = json.loads(request.form['presentation']) logging.info('verify presentation = ' + didkit.verify_presentation(json.dumps(presentation), '{}')) """ if json.loads(didkit.verify_presentation(request.form['presentation'], '{}'))['errors'] : logging.warning('signature failed') data = json.dumps({"id" : id, "data" : "signature_failed."}) red.publish('register_wallet', data) return jsonify("Signature verification failed"), 400 """ try : email = presentation['verifiableCredential']['credentialSubject']['email'] except : data = json.dumps({ "id" : id, "data" : 'wrong_vc'}) red.publish('register_wallet', data) return jsonify('wrong_vc'), 400 if ns.get_workspace_contract_from_did(presentation['holder'], mode) : data = json.dumps({ "id" : id, "data" : 'already_registered'}) red.publish('register_wallet', data) return jsonify('User already_registered'), 400 try : givenName = presentation['verifiableCredential']['credentialSubject']['givenName'] familyName = presentation['verifiableCredential']['credentialSubject']['familyName'] session_data = json.dumps({ "id" : id, "email" : email, "did" : presentation['holder'], "givenName" : givenName, "familyName" : familyName} ) except : session_data = json.dumps({"id" : id, "email" : email , "did" : presentation['holder']}) red.set(id, session_data ) data = json.dumps({ "id" : id, "data" : 'ok'}) red.publish('register_wallet', data) return jsonify('ok') def register_wallet_user(red, mode) : if request.method == 'GET' : id = request.args['id'] session_data = json.loads(red.get(id).decode()) red.delete(id) try : session['firstname'] = session_data['givenName'] session['lastname'] = session_data['familyName'] session['display'] = False except : session['display'] = True session['did'] = session_data['did'] session['email'] = session_data['email'] session['is_active'] = True return render_template("/register/register_wallet_user.html") if request.method == 'POST' : if not session.get('firstname') or not session.get('lastname') : session['firstname'] = request.form['firstname'] session['lastname'] = request.form['lastname'] session['username'] = ns.build_username(session['firstname'], session['lastname'], mode) session['search_directory'] = request.form.get('CGU') message = "" if not request.form.get('CGU') : message = _('Accept the service conditions to move next step.') if message : flash(message, 'warning') return render_template("/register/register_wallet_user.html", firstname=session['firstname'], lastname=session['lastname'], email=session['email']) return redirect (mode.server + 'register/create_for_wallet') # event push to browser def register_stream(red): def event_stream(red): pubsub = red.pubsub() pubsub.subscribe('register_wallet') for message in pubsub.listen(): if message['type']=='message': yield 'data: %s\n\n' % message['data'].decode() headers = { "Content-Type" : "text/event-stream", "Cache-Control" : "no-cache", "X-Accel-Buffering" : "no"} return Response(event_stream(red), headers=headers) def register_create_for_wallet(mode) : address, private_key, workspace_contract = createidentity.create_user(session['username'], session['email'], mode, did=session['did'], firstname=session['firstname'], lastname=session['lastname'], password='identity') if not workspace_contract : logging.error('createidentity failed') flash(_('Transaction failed.'), 'warning') return render_template("/register/user_register.html" ) directory.add_user(mode, session['username'], session['firstname'] + ' ' + session['lastname'], None) # create an Identity Pass create_identity_pass(session['did'], session['firstname'], session['lastname'], session['email'], workspace_contract, mode) # success exit session['wallet'] = "ok" return render_template("/register/end_of_registration.html", username=session['username'], wallet="ok") def register_error() : if request.args['message'] == 'already_registered' : message = _("This identity is already registered.") elif request.args['message'] == 'signature_failed' : message = _("This credential was not signed correctly.") elif request.args['message'] == 'wrong_vc' : message = _("This credential is not accepted.") else : message ='Unknown' return render_template("/register/registration_error.html", message=message) def create_identity_pass(did, firstname, lastname, email, workspace_contract, mode) : # load JSON-LD model for registration_IdentityPass unsigned_credential = json.load(open('./verifiable_credentials/registration_IdentityPass.jsonld', 'r')) # update credential with form data unsigned_credential["id"] = "urn:uuid:" + str(uuid.uuid1()) unsigned_credential["credentialSubject"]["id"] = did unsigned_credential["credentialSubject"]['recipient']["email"] = email unsigned_credential["credentialSubject"]['recipient']["familyName"] = firstname unsigned_credential["credentialSubject"]['recipient']["givenName"] = lastname unsigned_credential["issuanceDate"] = datetime.utcnow().replace(microsecond=0).isoformat() + "Z" unsigned_credential['issuer'] = did_selected PVK = privatekey.get_key(mode.owner_talao, 'private_key', mode) signed_credential = vc_signature.sign(unsigned_credential, PVK, did_selected) if not signed_credential : flash(_('Operation failed.'), 'danger') logging.error('credential signature failed') return # upload credential to repository with company key signature my_certificate = Document('certificate') if not my_certificate.relay_add(workspace_contract ,json.loads(signed_credential), mode, privacy='public')[0] : logging.error('Identity pass to repository failed') return False return True
python
import numpy as np import pandas as pd import os import sys """ Storey Q-Values - https://github.com/StoreyLab/qvalue -------------------- Python Wrapper Author: Francois Aguet https://github.com/broadinstitute/tensorqtl/blob/master/tensorqtl/rfunc.py """ def qvalue(p, lambda_qvalue=None): """Wrapper for qvalue::qvalue""" import rpy2 from rpy2.robjects.packages import importr from collections import Iterable qvalue = importr("qvalue") rp = rpy2.robjects.vectors.FloatVector(p) if lambda_qvalue is None: q = qvalue.qvalue(rp) else: if not isinstance(lambda_qvalue, Iterable): lambda_qvalue = [lambda_qvalue] rlambda = rpy2.robjects.vectors.FloatVector(lambda_qvalue) q = qvalue.qvalue(rp, **{'lambda':rlambda}) qval = np.array(q.rx2('qvalues')) pi0 = np.array(q.rx2('pi0'))[0] return qval, pi0 def t_test(mat: pd.DataFrame, group_s: pd.Series, equal_var: bool = False) -> pd.DataFrame: """ t-test --------------------- Args: * mat: pd.DataFrame (genes x samples) * group_s: series of groupings * equal_var: wald-ttest (False) """ from scipy import stats from statsmodels.stats.multitest import multipletests mat = mat[group_s.index] def _collapser(x, index, columns, name): _df = pd.DataFrame(x, index=index, columns=columns).reset_index() _id = _df.columns[0] return pd.melt( pd.DataFrame(x, index=index, columns=columns).reset_index(), id_vars=_id, ).set_index(_id).rename(columns={'variable':group_s.name,'value':name}) groups = np.array(group_s) X = mat.values n_groups = np.unique(groups).shape[0] n_genes = X.shape[0] # Init np.arrays t_stat = np.zeros((n_genes, n_groups)) pval = np.zeros((n_genes, n_groups)) pval_adj = np.zeros((n_genes, n_groups)) qval = np.zeros((n_genes, n_groups)) x_in = np.zeros((n_genes, n_groups)) x_out = np.zeros((n_genes, n_groups)) for idx,group in enumerate(np.unique(groups)): mask = groups==group if sum(mask) > 1: X_in = X[:,mask] X_out = X[:,~mask] t_stat[:,idx], pval[:,idx] = stats.ttest_ind(X_in, X_out, axis=1, equal_var=equal_var) _,pval_adj[:,idx],_,_ = multipletests( pval[:,idx], alpha=0.05, method='fdr_bh', is_sorted=False, returnsorted=False ) qval[:,idx],_ = qvalue(pval[:,idx]) x_in[:,idx] = np.mean(X_in,1) x_out[:,idx] = np.mean(X_out,1) # Collapse to dataframe de_df = pd.concat([ _collapser(x_in, mat.index, np.unique(groups), 'x_in'), _collapser(x_out, mat.index, np.unique(groups), 'x_out')['x_out'], _collapser(t_stat, mat.index, np.unique(groups), 't')['t'], _collapser(pval, mat.index, np.unique(groups), 'pval')['pval'], _collapser(pval_adj, mat.index, np.unique(groups), 'pval_adj')['pval_adj'], _collapser(qval, mat.index, np.unique(groups), 'qval')['qval'] ],1) # Fold-change de_df['diff'] = de_df['x_in'] - de_df['x_out'] # Signed FC * -log10(qval) de_df['gsea_rank'] = de_df['diff'] * -np.log10(de_df['pval_adj']) return de_df def mannwhitneyu(mat: pd.DataFrame, group_s: pd.Series) -> pd.DataFrame: """ mannwhitneyu --------------------- Args: * mat: pd.DataFrame (genes x samples) * group_s: series of groupings """ from tqdm import tqdm from scipy import stats from statsmodels.stats.multitest import multipletests from sys import stdout mat = mat[group_s.index] def _collapser(x, index, columns, name): _df = pd.DataFrame(x, index=index, columns=columns).reset_index() _id = _df.columns[0] return pd.melt( pd.DataFrame(x, index=index, columns=columns).reset_index(), id_vars=_id, ).set_index(_id).rename(columns={'variable':group_s.name,'value':name}) groups = np.array(group_s) X = mat.values n_groups = np.unique(groups).shape[0] n_genes = X.shape[0] # Init np.arrays u_stat = np.zeros((n_genes, n_groups)) pval = np.zeros((n_genes, n_groups)) pval_adj = np.zeros((n_genes, n_groups)) qval = np.zeros((n_genes, n_groups)) x_in = np.zeros((n_genes, n_groups)) x_out = np.zeros((n_genes, n_groups)) for idx,group in enumerate(np.unique(groups)): stdout.write("\r{} of {}".format(idx+1, n_groups)) mask = groups==group if sum(mask) > 1: X_in = X[:,mask] X_out = X[:,~mask] for gn in range(X_in.shape[0]): #u_stat[gn,idx], pval[gn,idx] = stats.mannwhitneyu(X_in[gn], X_out[gn]) u_stat[gn,idx], pval[gn,idx] = stats.mannwhitneyu(X_in[gn], X_out[gn], alternative='two-sided') _,pval_adj[:,idx],_,_ = multipletests( pval[:,idx], alpha=0.05, method='fdr_bh', is_sorted=False, returnsorted=False ) try: qval[:,idx],_ = qvalue(fgsea_df['pval'].values) except: try: qval[:,idx],_ = qvalue(fgsea_df['pval'].values, lambda_qvalue=0.5) except: qval[:,idx] = None x_in[:,idx] = np.mean(X_in,1) x_out[:,idx] = np.mean(X_out,1) # Collapse to dataframe de_df = pd.concat([ _collapser(x_in, mat.index, np.unique(groups), 'x_in'), _collapser(x_out, mat.index, np.unique(groups), 'x_out')['x_out'], _collapser(u_stat, mat.index, np.unique(groups), 'u')['u'], _collapser(pval, mat.index, np.unique(groups), 'pval')['pval'], _collapser(pval_adj, mat.index, np.unique(groups), 'pval_adj')['pval_adj'], _collapser(qval, mat.index, np.unique(groups), 'qval')['qval'] ],1) # Fold-change de_df['diff'] = de_df['x_in'] - de_df['x_out'] # Signed FC * -log10(qval) de_df['gsea_rank'] = de_df['diff'] * -np.log10(de_df['pval_adj']) return de_df
python
#-*- coding: utf-8 -*- import json import socket import hashlib import base64 import traceback from threading import Thread, Event from Queue import Queue, Empty from defs import * from protocol import parse_frame, make_frame from utils import r_select class _BaseWsSock(object): def _handshake(self, environ, start_response): connection = environ.get('HTTP_CONNECTION', '') or '' connection = connection.lower().split(',') connection = [c.strip() for c in connection if c.strip()] upgrade = environ.get('HTTP_UPGRADE', '') if 'upgrade' not in connection: return False elif upgrade.lower() != 'websocket': return False key = environ.get('HTTP_SEC_WEBSOCKET_KEY', '') if not key: return False protocol = environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', '') version = environ.get('HTTP_SEC_WEBSOCKET_VERSION', '') # --- key_hash = '%s%s' % (key, ws_uid) key_hash = base64.b64encode(hashlib.sha1(key_hash).digest()) # --- headers = [('upgrade', 'websocket'), ('connection', 'upgrade'), ('sec-websocket-accept', key_hash), ('x-handshake-by', '_BaseWsSock'), # ('sec-websocket-protocol', 'chat'), ] start_response('101 Switching protocols', headers) return True def html(self, environ, start_response): start_response('400 this is a websocket server.', {}) yield 'BAD REQUEST: this is a websocket server.' class WsSocket(_BaseWsSock): def __init__(self, environ, handler, values): self.environ = environ self.handler = handler self.values = values # print '---------------------' # for k in self.environ: # print k, type(self.environ[k]) f = self.environ.get('wsgi.input', None) # print dir(f) # print type(f) # print f.readable.__doc__ # print f.readable() self.f = f # self.evt_msg = Event() self.q_frame = Queue() self.q_recv = Queue() self.evt_open = Event() self.evt_close = Event() def handshake(self, environ, start_response): return super(WsSocket, self)._handshake(environ, start_response) def _frame(self, fin, op, payload, mask=False): return make_frame(fin, op, payload, mask=mask) def _nextframe(self, interval=0.50): while not self.evt_close.is_set(): try: frame = self.q_frame.get(True, interval) if frame: yield frame except Empty: yield None # def _sending_iter(self): # for frame in self._nextframe(): # if frame: # yield frame def _recv(self, timeout=5.0): if self.evt_close.is_set() or not self.f: raise WsError(u'websocket closed.') # print '----------- _recv ------------' # print self.f # print type(self.f) # print dir(self.f) t0, f = time.time(), None while not self.evt_close.is_set(): if hasattr(self.f, 'readable'): # r = [self.f] if self.f.readable() else [] # if not r: # time.sleep(timeout) r = [self.f] else: r = r_select([self.f], timeout=timeout) if not r: time.sleep(0.02) if time.time() - timeout > t0: raise WsTimeout() else: f = r[0] break try: fin, op, payload = parse_frame(f) if op == OP_CLOSE: self.close() elif op == OP_PING: pong = self._frame(True, OP_PONG, '') self.q_frame.put(pong) return fin, op, payload except (IOError, AttributeError, socket.error): raise except WsClosedByRemote: raise def _recv_to_q(self, timeout=0.02): try: fin, op, data = self._recv(timeout=timeout) if data: self.q_recv.put((fin, op, data)) except WsTimeout: pass except (WsIOError, WsClosedByRemote): self.close() def recv(self, timeout=5.0, allow_fragments=True): '''public recv(timeout=5.0)''' if self.evt_close.is_set(): raise WsError(u'websocket closed.') t0 = time.time() _op, _buff = None, None while t0 + timeout >= time.time(): try: frame = self.q_recv.get(True, 0.05) if frame: if allow_fragments: return frame else: fin, op, msg = frame if fin and not _buff: return frame elif not _buff: _op = op _buff = StringIO() _buff.write(msg) if fin: _buff.write(msg) return fin, _op, _buff.getvalue() except Empty: pass def send_json(self, v, fin=True, op=OP_TEXT, mask=False): if isinstance(v, unicode) or isinstance(v, str): return self.send(v) else: return self.send(json.dumps(v)) def send(self, data, fin=True, op=OP_TEXT, mask=False): '''public send(data)''' if not self.evt_close.is_set(): size = len(data) sub_f_size = MAX_FRAME_SIZE if fin and (size > sub_f_size): cur = 0 while True: part = data[cur: cur + sub_f_size] if not part: break _fin = 0 if cur + len(part) >= size: _fin = 1 _op = op if cur > 0: _op = 0 frame = self._frame(_fin, _op, part, mask=mask) self.q_frame.put(frame) cur += len(part) else: frame = self._frame(1 if fin else 0, op, data, mask=mask) self.q_frame.put(frame) else: raise WsError(u'websocket closed.') def ping(self): if not self.evt_close.is_set(): frame = self._frame(1, OP_PING, '') self.q_frame.put(frame) def close(self): '''public close()''' if not self.evt_close.is_set(): frame = self._frame(1, OP_CLOSE, '') self.q_frame.put(frame) time.sleep(0.05) self.evt_close.set() def _loop(self, only_downstream=False): for frame in self._nextframe(): if frame: yield frame elif not only_downstream: self._recv_to_q() def __call__(self): def invoke_handler(handler, sock): try: handler(sock, **sock.values) finally: sock.close() th = Thread(target=invoke_handler, args=(self.handler, self,)) th.setDaemon(True) th.start() try: yield self._frame(True, OP_PING, '') for item in self._loop(): yield item # for frame in self._nextframe(): # yield frame # print 'sending channel closed.' finally: self.close() th.join() # print 'session ended.' def server(self, server): if not server: raise ValueError('server instance required.') def recv(server, sock): while not sock.evt_open.is_set(): time.sleep(0.05) if hasattr(server, 'on_open'): server.on_open(self) while not sock.evt_close.is_set(): frame = sock.recv(timeout=1.0) if frame: server.on_message(sock, frame) def recv_to_q(sock): while not sock.evt_open.is_set(): time.sleep(0.05) while not sock.evt_close.is_set(): sock._recv_to_q() th_list = [] if hasattr(server, 'on_message'): th = Thread(target=recv, args=(server, self,)) th.setDaemon(True) th.start() th_list.append(th) th = Thread(target=recv_to_q, args=(self,)) th.setDaemon(True) th.start() th_list.append(th) yield self._frame(True, OP_PING, '') self.evt_open.set() try: for item in self._loop(only_downstream=True): yield item finally: self.close() if hasattr(server, 'on_close'): server.on_close(self) if th_list: for th in th_list: th.join()
python
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-09-21 18:55 from __future__ import unicode_literals from django.conf import settings import django.core.validators from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Department', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(default='no name', max_length=100)), ('bio', models.TextField(blank=True, null=True)), ('location', models.TextField(blank=True, null=True)), ('rank', models.PositiveIntegerField(default=0)), ('entity_type', models.PositiveIntegerField(choices=[(1, 'عام'), (2, 'خاص'), (3, 'أهلي')], default=1)), ('study_type', models.PositiveIntegerField(choices=[(1, 'فصول دراسية'), (2, 'ساعات معتمدة'), (3, 'غير ذلك')], default=1)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Faculty', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(default='no name', max_length=100)), ('bio', models.TextField(blank=True, null=True)), ('location', models.TextField(blank=True, null=True)), ('rank', models.PositiveIntegerField(default=0)), ('entity_type', models.PositiveIntegerField(choices=[(1, 'عام'), (2, 'خاص'), (3, 'أهلي')], default=1)), ('study_type', models.PositiveIntegerField(choices=[(1, 'فصول دراسية'), (2, 'ساعات معتمدة'), (3, 'غير ذلك')], default=1)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='SVProfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=200, validators=[django.core.validators.RegexValidator('^[\\u0621-\\u064Aa-zA-Z][\\u0621-\\u064Aa-zA-Z0-9]*([ ]?[\\u0621-\\u064Aa-zA-Z0-9]+)+$', 'Name cannot start with number, should consist of characters.')])), ('desc', models.TextField()), ('logo', models.ImageField(default='doctor.jpg', upload_to='')), ], ), migrations.CreateModel( name='University', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(default='no name', max_length=100)), ('bio', models.TextField(blank=True, null=True)), ('location', models.TextField(blank=True, null=True)), ('rank', models.PositiveIntegerField(default=0)), ('entity_type', models.PositiveIntegerField(choices=[(1, 'عام'), (2, 'خاص'), (3, 'أهلي')], default=1)), ('study_type', models.PositiveIntegerField(choices=[(1, 'فصول دراسية'), (2, 'ساعات معتمدة'), (3, 'غير ذلك')], default=1)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='UserProfile', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('level', models.IntegerField(default=1)), ('gender', models.PositiveIntegerField(choices=[(1, 'ذكر'), (2, 'أنثى'), (3, 'غير ذلك')], default=3)), ('count_of_posts', models.IntegerField(default=0)), ('count_of_replies', models.IntegerField(default=0)), ('academic_stats', models.PositiveIntegerField(choices=[(1, 'ناجح'), (2, 'راسب'), (3, 'ناجح بمواد'), (4, 'تحشسن مجموع')], default=1)), ('last_active_device', models.CharField(max_length=200)), ('department', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='depart_users', to='users.Department')), ('faculty', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='fac_users', to='users.Faculty')), ('university', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='uni_users', to='users.University')), ('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)), ], ), migrations.AddField( model_name='faculty', name='university', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='faculties', to='users.University'), ), migrations.AddField( model_name='department', name='faculty', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='departments', to='users.Faculty'), ), migrations.AddField( model_name='department', name='team', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='departments', to='users.SVProfile'), ), ]
python
__author__ = 'Will.Smith' # ----------------------------------------------------------------------------- # Name: WeightMethod.py # Purpose: Model for Weight Methods # # Author: Will Smith <[email protected]> # # Created: Jan 01, 2016 # License: MIT # ------------------------------------------------------------------------------ from PyQt5.QtCore import QObject, pyqtProperty, QVariant, pyqtSignal from py.common.FramListModel import FramListModel class WeightMethodModel(FramListModel): def __init__(self, parent=None): super().__init__(parent) self.add_role_name('text') self.add_role_name('method_id') class WeightMethod(QObject): def __init__(self, db): super().__init__() self._db = db self._model = WeightMethodModel() self._init_model() modelChanged = pyqtSignal() @pyqtProperty(QVariant, notify=modelChanged) def WeightMethodModel(self): return self._model def _init_model(self): for m in self._db.weight_methods: self._model.items.append({'text': m['text'], 'method_id': m['value']})
python
import random import torch from sl_cutscenes.constants import SCENARIO_DEFAULTS, PI from sl_cutscenes.objects.mesh_loader import MeshLoader from sl_cutscenes.objects.occupancy_matrix import OccupancyMatrix from sl_cutscenes.utils import utils as utils class DecoratorLoader: """ Class to add random decorative objects to the scene, which do not participate of the scene dynamics. It is based on creating an occupancy matrix of the scene, finding empty locations and placing stuff there """ def __init__(self, scene): """ Object initializer """ self.config = SCENARIO_DEFAULTS["decorator"] decorations = self.config["decorations"] bounds = self.config["bounds"] self.bounds = bounds self.pi = torch.acos(torch.zeros(1)) self.scene = scene self.mesh_loader = MeshLoader() self.mesh_loader.load_meshes(decorations), self.meshes = self.mesh_loader.get_meshes()[0] self.x_vect = torch.arange(bounds["min_x"], bounds["max_x"] + bounds["res"], bounds["res"]) self.y_vect = torch.arange(bounds["min_y"], bounds["max_y"] + bounds["res"], bounds["res"]) return def add_object(self, object_loader, object_id): """ Loading an object and adding to the loader """ obj_info, obj_mesh = self.meshes[object_id] pose = torch.eye(4) obj_mod = {"mod_pose": pose} obj = object_loader.create_object(obj_info, obj_mesh, True, **obj_mod) self.scene.add_object(obj) # shifting object to a free position and adjusting z-coord to be aligned with the table position = self.occ_matrix.find_free_spot(obj=obj) pose[:2, -1] = position if position is not None else torch.ones(2) pose[2, -1] += obj.mesh.bbox.max[-1] # Rotating object in yaw direction yaw_angle = random.choice([torch.tensor([i*PI / 2]) for i in range(4)]) angles = torch.cat([yaw_angle, torch.zeros(2)]) rot_matrix = utils.get_rot_matrix(angles=angles) pose[:3, :3] = pose[:3, :3] @ rot_matrix obj.set_pose(pose) self.occ_matrix.update_occupancy_matrix(obj) self.occ_matrix.add_object_margings() return def decorate_scene(self, object_loader): """ Randomly adding some decoderation to a scene """ # initializing occupancy matrix self.occ_matrix = OccupancyMatrix(bounds=self.bounds, objects=self.scene.objects) # iteratively placing objects while avoiding collision N = torch.randint(low=self.config["min_objs"], high=self.config["max_objs"], size=(1,)) for i in range(N): id = torch.randint(low=0, high=len(self.meshes), size=(1,)) self.add_object(object_loader, object_id=id) return
python
# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: proto/configuration.proto from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='proto/configuration.proto', package='slog.internal', syntax='proto3', serialized_options=None, serialized_pb=b'\n\x19proto/configuration.proto\x12\rslog.internal\"\x1c\n\x07Replica\x12\x11\n\taddresses\x18\x01 \x03(\x0c\"U\n\x1aReplicationDelayExperiment\x12\x1b\n\x13\x62\x61tch_delay_percent\x18\x01 \x01(\r\x12\x1a\n\x12\x62\x61tch_delay_amount\x18\x02 \x01(\r\"3\n\x10HashPartitioning\x12\x1f\n\x17partition_key_num_bytes\x18\x01 \x01(\r\"D\n\x12SimplePartitioning\x12\x13\n\x0bnum_records\x18\x01 \x01(\x04\x12\x19\n\x11record_size_bytes\x18\x02 \x01(\r\"\x90\x03\n\rConfiguration\x12\x10\n\x08protocol\x18\x01 \x01(\x0c\x12(\n\x08replicas\x18\x02 \x03(\x0b\x32\x16.slog.internal.Replica\x12\x13\n\x0b\x62roker_port\x18\x03 \x01(\r\x12\x13\n\x0bserver_port\x18\x04 \x01(\r\x12\x16\n\x0enum_partitions\x18\x05 \x01(\r\x12<\n\x11hash_partitioning\x18\x06 \x01(\x0b\x32\x1f.slog.internal.HashPartitioningH\x00\x12@\n\x13simple_partitioning\x18\x07 \x01(\x0b\x32!.slog.internal.SimplePartitioningH\x00\x12\x13\n\x0bnum_workers\x18\x08 \x01(\r\x12\x16\n\x0e\x62\x61tch_duration\x18\t \x01(\x04\x12\x44\n\x11replication_delay\x18\n \x01(\x0b\x32).slog.internal.ReplicationDelayExperimentB\x0e\n\x0cpartitioningb\x06proto3' ) _REPLICA = _descriptor.Descriptor( name='Replica', full_name='slog.internal.Replica', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='addresses', full_name='slog.internal.Replica.addresses', index=0, number=1, type=12, cpp_type=9, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=44, serialized_end=72, ) _REPLICATIONDELAYEXPERIMENT = _descriptor.Descriptor( name='ReplicationDelayExperiment', full_name='slog.internal.ReplicationDelayExperiment', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='batch_delay_percent', full_name='slog.internal.ReplicationDelayExperiment.batch_delay_percent', index=0, number=1, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='batch_delay_amount', full_name='slog.internal.ReplicationDelayExperiment.batch_delay_amount', index=1, number=2, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=74, serialized_end=159, ) _HASHPARTITIONING = _descriptor.Descriptor( name='HashPartitioning', full_name='slog.internal.HashPartitioning', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='partition_key_num_bytes', full_name='slog.internal.HashPartitioning.partition_key_num_bytes', index=0, number=1, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=161, serialized_end=212, ) _SIMPLEPARTITIONING = _descriptor.Descriptor( name='SimplePartitioning', full_name='slog.internal.SimplePartitioning', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='num_records', full_name='slog.internal.SimplePartitioning.num_records', index=0, number=1, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='record_size_bytes', full_name='slog.internal.SimplePartitioning.record_size_bytes', index=1, number=2, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=214, serialized_end=282, ) _CONFIGURATION = _descriptor.Descriptor( name='Configuration', full_name='slog.internal.Configuration', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='protocol', full_name='slog.internal.Configuration.protocol', index=0, number=1, type=12, cpp_type=9, label=1, has_default_value=False, default_value=b"", message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='replicas', full_name='slog.internal.Configuration.replicas', index=1, number=2, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='broker_port', full_name='slog.internal.Configuration.broker_port', index=2, number=3, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='server_port', full_name='slog.internal.Configuration.server_port', index=3, number=4, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='num_partitions', full_name='slog.internal.Configuration.num_partitions', index=4, number=5, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='hash_partitioning', full_name='slog.internal.Configuration.hash_partitioning', index=5, number=6, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='simple_partitioning', full_name='slog.internal.Configuration.simple_partitioning', index=6, number=7, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='num_workers', full_name='slog.internal.Configuration.num_workers', index=7, number=8, type=13, cpp_type=3, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='batch_duration', full_name='slog.internal.Configuration.batch_duration', index=8, number=9, type=4, cpp_type=4, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='replication_delay', full_name='slog.internal.Configuration.replication_delay', index=9, number=10, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ _descriptor.OneofDescriptor( name='partitioning', full_name='slog.internal.Configuration.partitioning', index=0, containing_type=None, fields=[]), ], serialized_start=285, serialized_end=685, ) _CONFIGURATION.fields_by_name['replicas'].message_type = _REPLICA _CONFIGURATION.fields_by_name['hash_partitioning'].message_type = _HASHPARTITIONING _CONFIGURATION.fields_by_name['simple_partitioning'].message_type = _SIMPLEPARTITIONING _CONFIGURATION.fields_by_name['replication_delay'].message_type = _REPLICATIONDELAYEXPERIMENT _CONFIGURATION.oneofs_by_name['partitioning'].fields.append( _CONFIGURATION.fields_by_name['hash_partitioning']) _CONFIGURATION.fields_by_name['hash_partitioning'].containing_oneof = _CONFIGURATION.oneofs_by_name['partitioning'] _CONFIGURATION.oneofs_by_name['partitioning'].fields.append( _CONFIGURATION.fields_by_name['simple_partitioning']) _CONFIGURATION.fields_by_name['simple_partitioning'].containing_oneof = _CONFIGURATION.oneofs_by_name['partitioning'] DESCRIPTOR.message_types_by_name['Replica'] = _REPLICA DESCRIPTOR.message_types_by_name['ReplicationDelayExperiment'] = _REPLICATIONDELAYEXPERIMENT DESCRIPTOR.message_types_by_name['HashPartitioning'] = _HASHPARTITIONING DESCRIPTOR.message_types_by_name['SimplePartitioning'] = _SIMPLEPARTITIONING DESCRIPTOR.message_types_by_name['Configuration'] = _CONFIGURATION _sym_db.RegisterFileDescriptor(DESCRIPTOR) Replica = _reflection.GeneratedProtocolMessageType('Replica', (_message.Message,), { 'DESCRIPTOR' : _REPLICA, '__module__' : 'proto.configuration_pb2' # @@protoc_insertion_point(class_scope:slog.internal.Replica) }) _sym_db.RegisterMessage(Replica) ReplicationDelayExperiment = _reflection.GeneratedProtocolMessageType('ReplicationDelayExperiment', (_message.Message,), { 'DESCRIPTOR' : _REPLICATIONDELAYEXPERIMENT, '__module__' : 'proto.configuration_pb2' # @@protoc_insertion_point(class_scope:slog.internal.ReplicationDelayExperiment) }) _sym_db.RegisterMessage(ReplicationDelayExperiment) HashPartitioning = _reflection.GeneratedProtocolMessageType('HashPartitioning', (_message.Message,), { 'DESCRIPTOR' : _HASHPARTITIONING, '__module__' : 'proto.configuration_pb2' # @@protoc_insertion_point(class_scope:slog.internal.HashPartitioning) }) _sym_db.RegisterMessage(HashPartitioning) SimplePartitioning = _reflection.GeneratedProtocolMessageType('SimplePartitioning', (_message.Message,), { 'DESCRIPTOR' : _SIMPLEPARTITIONING, '__module__' : 'proto.configuration_pb2' # @@protoc_insertion_point(class_scope:slog.internal.SimplePartitioning) }) _sym_db.RegisterMessage(SimplePartitioning) Configuration = _reflection.GeneratedProtocolMessageType('Configuration', (_message.Message,), { 'DESCRIPTOR' : _CONFIGURATION, '__module__' : 'proto.configuration_pb2' # @@protoc_insertion_point(class_scope:slog.internal.Configuration) }) _sym_db.RegisterMessage(Configuration) # @@protoc_insertion_point(module_scope)
python
import pandas as pd import numpy as np from misc import data_io DATA_DIR = 'data/ut-interaction/' """ Folder structure <'set1' or 'set2'>/keypoints <video_name>/ <video_name>_<frame_num>_keypoints.json ... Ex: DATA_DIR + 'set1/keypoints/0_1_4/0_1_4_000000000042_keypoints.json' """ VIDEOS = [ ['0_1_4','1_1_2','2_1_1','3_1_3','4_1_0','5_1_5','6_2_4','7_2_5','8_2_0', '9_2_2','10_2_1','11_2_3','12_3_4','13_3_2','14_3_1','15_3_3','16_3_5', '17_3_0','18_4_4','19_4_1','20_4_2','21_4_0','22_4_3','23_4_5','24_5_0', '25_5_4','26_5_2','27_5_1','28_5_3','29_5_5','30_6_2','31_6_5','32_6_1', '33_6_3','34_6_0','35_7_0','36_7_5','37_7_4','38_7_2','39_7_3','40_7_1', '41_8_0','42_8_2','43_8_4','44_8_4','45_8_5','46_8_3','47_8_1','48_9_3', '49_9_5','50_9_2','51_9_4','52_9_0','53_9_1','54_10_0','55_10_4','56_10_5', '57_10_3','58_10_1','59_10_2'], #set1 ['0_11_4','1_11_2','2_11_5','3_11_0','4_11_3','5_11_1','6_12_0','7_12_3', '8_12_5','9_12_1','10_12_4','11_12_2','12_13_4','13_13_2','14_13_1', '15_13_3','16_13_5','17_13_0','18_14_0','19_14_1','20_14_5','21_14_3', '22_14_4','23_14_2','24_15_1','25_15_0','26_15_4','27_15_2','28_15_3', '29_15_5','30_16_3','31_16_0','32_16_1','33_16_4','34_16_2','35_16_5', '36_17_1','37_17_0','38_17_3','39_17_5','40_17_4','41_17_2','42_18_2', '43_18_4','44_18_1','45_18_3','46_18_5','47_18_0','48_19_0','49_19_1', '50_19_4','51_19_3','52_19_5','53_19_2','54_20_1','55_20_0','56_20_5', '57_20_3','58_20_4','59_20_2'] #set2 ] ACTIONS = ['Hand Shaking','Hugging','Kicking','Pointing','Punching','Pushing'] def get_ground_truth(data_dir=DATA_DIR): video_lst, setid_lst, seq_lst, path_lst, action_lst = [], [], [], [], [] for set_id, set_videos in enumerate(VIDEOS): video_lst = video_lst + set_videos setid_lst = setid_lst + len(set_videos)*[set_id+1] for video in set_videos: num, seq, action = video.split('_') seq_lst.append(int(seq)) action_lst.append(int(action)) path = '{}set{}/keypoints/{}/'.format(data_dir, set_id+1, video) path_lst.append(path) dataframe_dict = {'video_id': video_lst, 'setid': setid_lst, 'seq': seq_lst, 'path': path_lst, 'action': action_lst} ground_truth = pd.DataFrame(dataframe_dict).set_index('video_id') return ground_truth def get_folds(setid): if setid == 1: folds = np.arange(10) elif setid == 2: folds = np.arange(10, 20) else: raise ValueError("setid must be 1 or 2, value entered: "+str(setid)) return folds def get_train_gt(fold_num): if fold_num < 0 or fold_num > 19: raise ValueError("fold_num must be within 0 and 19, value entered: "+str(fold_num)) if fold_num < 10: setid = 1 sequences = np.arange(10) fold_sequences = sequences[sequences != fold_num] + 1 else: setid = 2 sequences = np.arange(10, 20) fold_sequences = sequences[sequences != fold_num] + 1 ground_truth = get_ground_truth() gt_split = ground_truth[ground_truth.setid == setid] gt_split = gt_split[gt_split.seq.isin(fold_sequences)] return gt_split def get_val_gt(fold_num): if fold_num < 0 or fold_num > 19: raise ValueError("fold_num must be within 0 and 19, value entered: "+str(fold_num)) if fold_num < 10: setid = 1 sequences = np.arange(10) fold_sequences = sequences[sequences == fold_num] + 1 else: setid = 2 sequences = np.arange(10, 20) fold_sequences = sequences[sequences == fold_num] + 1 ground_truth = get_ground_truth() gt_split = ground_truth[ground_truth.setid == setid] gt_split = gt_split[gt_split.seq.isin(fold_sequences)] return gt_split def get_train(fold_num, **kwargs): if fold_num < 0 or fold_num > 19: raise ValueError("fold_num must be within 0 and 19, value entered: "+str(fold_num)) if fold_num < 10: setid = 1 sequences = np.arange(10) fold_sequences = sequences[sequences != fold_num] + 1 else: setid = 2 sequences = np.arange(10, 20) fold_sequences = sequences[sequences != fold_num] + 1 return get_seqs(setid, fold_sequences, **kwargs) def get_val(fold_num, **kwargs): if fold_num < 0 or fold_num > 19: raise ValueError("fold_num must be within 0 and 19, value entered: "+str(fold_num)) if fold_num < 10: setid = 1 sequences = np.arange(10) fold_sequences = sequences[sequences == fold_num] + 1 else: setid = 2 sequences = np.arange(10, 20) fold_sequences = sequences[sequences == fold_num] + 1 return get_seqs(setid, fold_sequences, **kwargs) def get_seqs(setid, selected_sequences, **kwargs): if setid < 1 or setid > 2: raise ValueError("setid must be 1 or 2, value entered: "+str(setid)) ground_truth = get_ground_truth() gt_split = ground_truth[ground_truth.setid == setid] gt_split = gt_split[gt_split.seq.isin(selected_sequences)] X, Y = data_io.get_data(gt_split, pose_style='OpenPose', **kwargs) return X, Y
python
from flask import Flask, render_template app = Flask(__name__) @app.route('/') def home(): return render_template('index.html') @app.route('/projects') def projects(): return render_template('projects.html') @app.route('/about') def about(): return render_template('about.html') app.run(debug=True)
python
import sys sys.path.append('/home/jwalker/dynamics/python/atmos-tools') sys.path.append('/home/jwalker/dynamics/python/atmos-read') import atmos as atm import merra from merra import calc_fluxes scratchdir = '/net/eady/data1/jwalker/datastore/scratch/' def filename(varname, datestr): savedir = '/net/eady/data1/jwalker/datastore/merra/monthly/' filen = savedir + varname + datestr print('Saving to ' + filen) return filen year = 1979 month = 3 datestr = '_%d%02d.nc' % (year, month) ds = calc_fluxes(year, month, scratchdir=scratchdir) ds.to_netcdf(filename('fluxes', datestr))
python
#!/usr/bin/env python # Copyright 2016 WebAssembly Community Group participants # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys, re def read_yy(): with open('was_parser.yy', 'r') as f: read_data = f.read() grammar_content = re.compile(r"%%\n([\s\S]*)%%", re.M); m = grammar_content.search(read_data) remove_c_code = re.compile(r"\s+{\s[^}]*[^\n]*", re.M); no_code = re.sub(remove_c_code, "", m.group(1)) return no_code def read_l(): with open('was_lexer.l', 'r') as f: read_data = f.read() remove_c_code = re.compile(r"%\{((?!%\})[\s\S])*%\}", re.M); remove_c_header = re.compile(r"/\*((?!\*/)[\s\S])*\*/\s*", re.M); no_code = re.sub(remove_c_code, "", re.sub(remove_c_header, "", read_data)); remove_options = re.compile(r"^%\w[^\n]*\n", re.M); no_options = re.sub(remove_options, "", no_code); lexer_content = re.compile(r"\n*([\s\S]*)%%\n([\s\S]*)%%", re.M); m = lexer_content.search(no_options) sequences = m.group(1) tokens = m.group(2) simplify_tokens = re.compile(r"(\s+)\{.*?return\s+token::([^;]+);\s+\}", re.M) simplified_tokens = re.sub(simplify_tokens, r"\1\2", tokens) removed_trivial = re.sub(r"\n\x22([^\x22]+)\x22\s+\{.*?return\('\1'\)[^\n]+", "",simplified_tokens) removed_stats = re.sub(r"(\s+)\{\s+BEGIN\(([^\)]+)\);\s+\}", r"\1STATE:\2", removed_trivial) removed_code = re.sub(r"(\s+)\{[^\}]+\}[^\n]*", "", removed_stats); return sequences + removed_code print "# Grammar Rules" print print read_yy() print print "# Scanner/Lexer" print print read_l() print
python
''' author: eleclike date: '''
python
# -*- coding: utf-8 -*- # Part of the masterfile package: https://github.com/uwmadison-chm/masterfile # Copyright (c) 2020 Board of Regents of the University of Wisconsin System # Written by Nate Vack <[email protected]> at the Center for Healthy Minds # at the University of Wisconsin-Madison. # Released under MIT licence; see LICENSE at the package root. """ The masterfile annotator. This class takes the a dictionary and a masterfile, and can "annotate" dataframes' series with the metadata from the dictionary, so you can do things like: df = mf.dataframe df.t1_ourMeasure.contact { 'ourMeasure': 'Jordan Smith' } """ from __future__ import absolute_import, unicode_literals from collections import defaultdict import attr def annotate_masterfile(mf): ann = Annotator.from_masterfile(mf) ann.annotate_masterfile() @attr.s class Annotator(object): masterfile = attr.ib() dictionary = attr.ib() error_list = attr.ib(default=attr.Factory(list)) @classmethod def from_masterfile(klass, mf): return klass(masterfile=mf, dictionary=mf.dictionary) def annotate_masterfile(self): self.annotate_dataframe(self.masterfile.dataframe) def annotate_dataframe(self, df): df._metadata = list(self.dictionary.columns) for series_name, series in df.iteritems(): self.annotate_series(series) def make_series_annotations(self, series_name): """ Create a dictionary of annotations for a series, of the format: { dictionaryColumn: {componentName_componentValue: dictionaryValue} ... } So if your dictionary has a timepoint t1 with the long_name "Time 1", you'll get: { 'long_name': {'timepoint_t1': 'Time 1'} ... } I'm not very happy with this code, it's ugly as hell, but I don't have a clear way to clean it up. """ d = defaultdict(dict) for comp, val in self.masterfile.column_components(series_name): label = '{}_{}'.format(comp, val) component_annotations = self.dictionary.annotations_for(comp, val) for ann_label, ann_value in component_annotations.items(): d[ann_label][label] = ann_value return d def annotate_series(self, series): annotations = self.make_series_annotations(series.name) for attribute, values in annotations.items(): setattr(series, attribute, values)
python